diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 0103282ae3..c8c90dc26a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,6 @@ name: Bug report about: Create a report to help us improve title: "[Bug][Module Name] Bug title " -labels: bug assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 4053490497..82d811e880 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,6 @@ name: Feature request about: Suggest an idea for this project title: "[Feature][Module Name] Feature title" -labels: new feature assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/improvement_suggestion.md b/.github/ISSUE_TEMPLATE/improvement_suggestion.md index 9376e20418..544d98eae5 100644 --- a/.github/ISSUE_TEMPLATE/improvement_suggestion.md +++ b/.github/ISSUE_TEMPLATE/improvement_suggestion.md @@ -2,7 +2,6 @@ name: Improvement suggestion about: Improvement suggestion for this project title: "[Improvement][Module Name] Improvement title" -labels: improvement assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 6fdb06ea6c..961d82476e 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -2,7 +2,6 @@ name: Question about: Have a question wanted to be help title: "[Question] Question title" -labels: question assignees: '' --- diff --git a/.github/workflows/ci_backend.yml b/.github/workflows/ci_backend.yml index 7b245d77ef..e19336eab3 100644 --- a/.github/workflows/ci_backend.yml +++ b/.github/workflows/ci_backend.yml @@ -49,7 +49,7 @@ jobs: with: submodule: true - name: Check License Header - uses: apache/skywalking-eyes@9bd5feb + uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82 - name: Set up JDK 1.8 uses: actions/setup-java@v1 with: diff --git a/.github/workflows/ci_e2e.yml b/.github/workflows/ci_e2e.yml index 3e781056d0..cab1eeda61 100644 --- a/.github/workflows/ci_e2e.yml +++ b/.github/workflows/ci_e2e.yml @@ -33,7 +33,7 @@ jobs: with: submodule: true - name: Check License Header - uses: apache/skywalking-eyes@9bd5feb + uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82 - uses: actions/cache@v1 with: path: ~/.m2/repository diff --git a/.github/workflows/ci_ut.yml b/.github/workflows/ci_ut.yml index 09542d97f1..bb1e8c83a9 100644 --- a/.github/workflows/ci_ut.yml +++ b/.github/workflows/ci_ut.yml @@ -36,7 +36,7 @@ jobs: with: submodule: true - name: Check License Header - uses: apache/skywalking-eyes@9bd5feb + uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Only enable review / suggestion here - uses: actions/cache@v1 diff --git a/.gitignore b/.gitignore index 6717162b37..9011db1479 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,10 @@ .target .idea/ target/ +dist/ +all-dependencies.txt +self-modules.txt +third-party-dependencies.txt .settings .nbproject .classpath diff --git a/README.md b/README.md index ede405baa7..f306a398b5 100644 --- a/README.md +++ b/README.md @@ -7,46 +7,44 @@ Dolphin Scheduler Official Website [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache-dolphinscheduler&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache-dolphinscheduler) -> Dolphin Scheduler for Big Data - [![Stargazers over time](https://starchart.cc/apache/incubator-dolphinscheduler.svg)](https://starchart.cc/apache/incubator-dolphinscheduler) [![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md) [![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md) -### Design features: +### Design Features: -Dolphin Scheduler is a distributed and easy-to-extend visual DAG workflow scheduling system. It dedicates to solving the complex dependencies in data processing to make the scheduling system `out of the box` for the data processing process. +DolphinScheduler is a distributed and extensible workflow scheduler platform with powerful DAG visual interfaces, dedicated to solving complex job dependencies in the data pipeline and providing various types of jobs available `out of the box`. Its main objectives are as follows: - Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of the task in real-time. - - Support many task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc. - - Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support for failed retry/alarm, recovery from specified nodes, Kill task, etc. - - Support the priority of process & task, task failover, and task timeout alarm or failure. - - Support process global parameters and node custom parameter settings. - - Support online upload/download of resource files, management, etc. Support online file creation and editing. - - Support task log online viewing and scrolling, online download log, etc. - - Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper. + - Support various task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc. + - Support scheduling of workflows and dependencies, manual scheduling to pause/stop/recover task, support failure task retry/alarm, recover specified nodes from failure, kill task, etc. + - Support the priority of workflows & tasks, task failover, and task timeout alarm or failure. + - Support workflow global parameters and node customized parameter settings. + - Support online upload/download/management of resource files, etc. Support online file creation and editing. + - Support task log online viewing and scrolling and downloading, etc. + - Have implemented cluster HA, decentralize Master cluster and Worker cluster through Zookeeper. - Support the viewing of Master/Worker CPU load, memory, and CPU usage metrics. - - Support presenting tree or Gantt chart of workflow history as well as the statistics results of task & process status in each workflow. - - Support backfilling data. + - Support displaying workflow history in tree/Gantt chart, as well as statistical analysis on the task status & process status in each workflow. + - Support back-filling data. - Support multi-tenant. - Support internationalization. - - There are more waiting for partners to explore... + - More features waiting for partners to explore... -### What's in Dolphin Scheduler +### What's in DolphinScheduler Stability | Easy to use | Features | Scalability | -- | -- | -- | -- -Decentralized multi-master and multi-worker | Visualization process defines key information such as task status, task type, retry times, task running machine, visual variables, and so on at a glance.  |  Support pause, recover operation | Support custom task types -HA is supported by itself | All process definition operations are visualized, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, the API mode operation is provided. | Users on Dolphin Scheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler uses distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic online and offline. -Overload processing: Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | | +Decentralized multi-master and multi-worker | Visualization of workflow key information, such as task status, task type, retry times, task operation machine information, visual variables, and so on at a glance.  |  Support pause, recover operation | Support customized task types +support HA | Visualization of all workflow operations, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, provide API mode operations. | Users on DolphinScheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler supports distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic adjustment. +Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | | -### System partial screenshot +### User Interface Screenshots ![home page](https://user-images.githubusercontent.com/15833811/75218288-bf286400-57d4-11ea-8263-d639c6511d5f.jpg) ![dag](https://user-images.githubusercontent.com/15833811/75236750-3374fe80-57f9-11ea-857d-62a66a5a559d.png) @@ -57,13 +55,9 @@ Overload processing: Overload processing: By using the task queue mechanism, the ![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png) ![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png) +### QuickStart in Docker +Please referer the official website document:[[QuickStart in Docker](https://dolphinscheduler.apache.org/en-us/docs/1.3.4/user_doc/docker-deployment.html)] -### Recent R&D plan -The work plan of Dolphin Scheduler: [R&D plan](https://github.com/apache/incubator-dolphinscheduler/projects/1), which `In Develop` card shows the features that are currently being developed and TODO card lists what needs to be done(including feature ideas). - -### How to contribute - -Welcome to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/docs/development/contribute.html)] ### How to Build @@ -80,14 +74,16 @@ dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release ### Thanks -Dolphin Scheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on. -We would like to express our deep gratitude to all the open-source projects which contribute to making the dream of Dolphin Scheduler comes true. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we expect the partners who have the same passion and conviction to open-source will join in and contribute to the open-source community! - +DolphinScheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on. +We would like to express our deep gratitude to all the open-source projects used in Dolphin Scheduler. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we hope everyone who have the same enthusiasm and passion for open source could join in and contribute to the open-source community! ### Get Help -1. Submit an issue +1. Submit an [[issue](https://github.com/apache/incubator-dolphinscheduler/issues/new/choose)] 1. Subscribe to the mail list: https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html, then email dev@dolphinscheduler.apache.org +### How to Contribute +The community welcomes everyone to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/community/development/contribute.html)] + ### License Please refer to the [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file. diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.3/configuration/dolphin-worker.xml b/ambari_plugin/common-services/DOLPHIN/1.3.3/configuration/dolphin-worker.xml index 1ae7a1a765..e5a3adf9f6 100644 --- a/ambari_plugin/common-services/DOLPHIN/1.3.3/configuration/dolphin-worker.xml +++ b/ambari_plugin/common-services/DOLPHIN/1.3.3/configuration/dolphin-worker.xml @@ -15,53 +15,64 @@ ~ limitations under the License. --> - - worker.exec.threads - 100 - - int - - worker execute thread num - - - - worker.heartbeat.interval - 10 - - int - - worker heartbeat interval - - - - worker.max.cpuload.avg - 100 - - int - - only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 - - - - worker.reserved.memory - 0.3 - only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G. - - - - - worker.listen.port - 1234 - - int - - worker listen port - - - - worker.groups - default - default worker group - - + + worker.exec.threads + 100 + + int + + worker execute thread num + + + + worker.heartbeat.interval + 10 + + int + + worker heartbeat interval + + + + worker.max.cpuload.avg + 100 + + int + + only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 + + + + + worker.reserved.memory + 0.3 + only larger than reserved memory, worker server can work. default value : physical memory * 1/10, + unit is G. + + + + + worker.listen.port + 1234 + + int + + worker listen port + + + + worker.groups + default + default worker group + + + + worker.weigth + 100 + + int + + worker weight + + \ No newline at end of file diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile index 7b0f09c539..a33cf490b1 100644 --- a/docker/build/Dockerfile +++ b/docker/build/Dockerfile @@ -15,55 +15,37 @@ # limitations under the License. # -FROM nginx:alpine +FROM openjdk:8-jdk-alpine ARG VERSION ENV TZ Asia/Shanghai ENV LANG C.UTF-8 -ENV DEBIAN_FRONTEND noninteractive +ENV DOCKER true -#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo. -#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example: -#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories +# 1. install command/library/software +# If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example: +# RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories +# RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.tuna.tsinghua.edu.cn/g' /etc/apk/repositories RUN apk update && \ - apk add --update --no-cache dos2unix shadow bash openrc python2 python3 sudo vim wget iputils net-tools openssh-server py-pip tini && \ - apk add --update --no-cache procps && \ - openrc boot && \ - pip install kazoo + apk add --no-cache tzdata dos2unix bash python2 python3 procps sudo shadow tini postgresql-client && \ + cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + apk del tzdata && \ + rm -rf /var/cache/apk/* -#2. install jdk -RUN apk add --update --no-cache openjdk8 -ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk -ENV PATH $JAVA_HOME/bin:$PATH - -#3. add dolphinscheduler +# 2. add dolphinscheduler ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/ -RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/ +RUN ln -s /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin /opt/dolphinscheduler ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler -#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed -RUN apk add --update --no-cache postgresql postgresql-contrib - -#5. modify nginx -RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \ - rm -rf /etc/nginx/conf.d/* -COPY ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d - -#6. add configuration and modify permissions and set soft links +# 3. add configuration and modify permissions and set soft links COPY ./checkpoint.sh /root/checkpoint.sh COPY ./startup-init-conf.sh /root/startup-init-conf.sh COPY ./startup.sh /root/startup.sh COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/ COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/ -COPY conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/ -RUN chmod +x /root/checkpoint.sh && \ - chmod +x /root/startup-init-conf.sh && \ - chmod +x /root/startup.sh && \ - chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \ - chmod +x /opt/dolphinscheduler/script/*.sh && \ - chmod +x /opt/dolphinscheduler/bin/*.sh && \ - dos2unix /root/checkpoint.sh && \ +COPY ./conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/ +RUN dos2unix /root/checkpoint.sh && \ dos2unix /root/startup-init-conf.sh && \ dos2unix /root/startup.sh && \ dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \ @@ -71,13 +53,10 @@ RUN chmod +x /root/checkpoint.sh && \ dos2unix /opt/dolphinscheduler/bin/*.sh && \ rm -rf /bin/sh && \ ln -s /bin/bash /bin/sh && \ - mkdir -p /tmp/xls && \ - #7. remove apk index cache and disable coredup for sudo - rm -rf /var/cache/apk/* && \ + mkdir -p /var/mail /tmp/xls && \ echo "Set disable_coredump false" >> /etc/sudo.conf - -#8. expose port -EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888 +# 4. expose port +EXPOSE 5678 1234 12345 50051 ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"] diff --git a/docker/build/README.md b/docker/build/README.md index 951f2d6b51..3a2a1666bb 100644 --- a/docker/build/README.md +++ b/docker/build/README.md @@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org #### You can start a dolphinscheduler instance ``` -$ docker run -dit --name dolphinscheduler \ +$ docker run -dit --name dolphinscheduler \ -e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -33,7 +33,7 @@ You can specify **existing postgres service**. Example: $ docker run -dit --name dolphinscheduler \ -e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -43,7 +43,7 @@ You can specify **existing zookeeper service**. Example: $ docker run -dit --name dolphinscheduler \ -e ZOOKEEPER_QUORUM="l92.168.x.x:2181" -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -90,14 +90,6 @@ $ docker run -dit --name dolphinscheduler \ dolphinscheduler alert-server ``` -* Start a **frontend**, For example: - -``` -$ docker run -dit --name dolphinscheduler \ --e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ --p 8888:8888 \ -dolphinscheduler frontend -``` **Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server. @@ -146,7 +138,7 @@ This environment variable sets the host for database. The default value is `127. This environment variable sets the port for database. The default value is `5432`. -**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. +**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`. **`DATABASE_USERNAME`** @@ -306,18 +298,6 @@ This environment variable sets enterprise wechat agent id for `alert-server`. Th This environment variable sets enterprise wechat users for `alert-server`. The default value is empty. -**`FRONTEND_API_SERVER_HOST`** - -This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`. - -**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. - -**`FRONTEND_API_SERVER_PORT`** - -This environment variable sets api server port for `frontend`. The default value is `123451`. - -**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`. - ## Initialization scripts If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`. @@ -326,7 +306,7 @@ For example, to add an environment variable `API_SERVER_PORT` in `/root/start-in ``` export API_SERVER_PORT=5555 -``` +``` and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port: ``` @@ -343,8 +323,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) EOF " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} done - -echo "generate nginx config" -sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf -sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf ``` diff --git a/docker/build/README_zh_CN.md b/docker/build/README_zh_CN.md index c4339a945c..ffa0020dcd 100644 --- a/docker/build/README_zh_CN.md +++ b/docker/build/README_zh_CN.md @@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org #### 你可以运行一个dolphinscheduler实例 ``` -$ docker run -dit --name dolphinscheduler \ +$ docker run -dit --name dolphinscheduler \ -e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -33,7 +33,7 @@ dolphinscheduler all $ docker run -dit --name dolphinscheduler \ -e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \ -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -43,7 +43,7 @@ dolphinscheduler all $ docker run -dit --name dolphinscheduler \ -e ZOOKEEPER_QUORUM="l92.168.x.x:2181" -e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \ --p 8888:8888 \ +-p 12345:12345 \ dolphinscheduler all ``` @@ -90,15 +90,6 @@ $ docker run -dit --name dolphinscheduler \ dolphinscheduler alert-server ``` -* 启动一个 **frontend**, 如下: - -``` -$ docker run -dit --name dolphinscheduler \ --e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \ --p 8888:8888 \ -dolphinscheduler frontend -``` - **注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`。 ## 如何构建一个docker镜像 @@ -306,18 +297,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些 配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`。 -**`FRONTEND_API_SERVER_HOST`** - -配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`。 - -**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 - -**`FRONTEND_API_SERVER_PORT`** - -配置`frontend`的连接`api-server`的端口,默认值 `12345`。 - -**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。 - ## 初始化脚本 如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件 @@ -326,7 +305,7 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些 ``` export API_SERVER_PORT=5555 -``` +``` 当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置: ``` @@ -343,8 +322,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) EOF " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} done - -echo "generate nginx config" -sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf -sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf ``` diff --git a/docker/build/checkpoint.sh b/docker/build/checkpoint.sh old mode 100644 new mode 100755 diff --git a/docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh b/docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh old mode 100644 new mode 100755 index 78c8f98bc1..7fd39335ae --- a/docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh +++ b/docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh @@ -15,6 +15,14 @@ # limitations under the License. # -export PYTHON_HOME=/usr/bin/python2 +export HADOOP_HOME=/opt/soft/hadoop +export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop +export SPARK_HOME1=/opt/soft/spark1 +export SPARK_HOME2=/opt/soft/spark2 +export PYTHON_HOME=/usr/bin/python export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk -export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH +export HIVE_HOME=/opt/soft/hive +export FLINK_HOME=/opt/soft/flink +export DATAX_HOME=/opt/soft/datax/bin/datax.py + +export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH diff --git a/docker/build/conf/dolphinscheduler/logback/logback-alert.xml b/docker/build/conf/dolphinscheduler/logback/logback-alert.xml index 1718947dd1..eec78385db 100644 --- a/docker/build/conf/dolphinscheduler/logback/logback-alert.xml +++ b/docker/build/conf/dolphinscheduler/logback/logback-alert.xml @@ -20,14 +20,6 @@ - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - ${log.base}/dolphinscheduler-alert.log @@ -45,7 +37,6 @@ - diff --git a/docker/build/conf/dolphinscheduler/logback/logback-api.xml b/docker/build/conf/dolphinscheduler/logback/logback-api.xml index 2df90d8392..6d29f8af5f 100644 --- a/docker/build/conf/dolphinscheduler/logback/logback-api.xml +++ b/docker/build/conf/dolphinscheduler/logback/logback-api.xml @@ -20,14 +20,6 @@ - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - @@ -55,7 +47,6 @@ - diff --git a/docker/build/conf/dolphinscheduler/logback/logback-master.xml b/docker/build/conf/dolphinscheduler/logback/logback-master.xml index 7410c01f05..d1bfb67aa1 100644 --- a/docker/build/conf/dolphinscheduler/logback/logback-master.xml +++ b/docker/build/conf/dolphinscheduler/logback/logback-master.xml @@ -20,14 +20,6 @@ - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - @@ -74,7 +66,6 @@ - diff --git a/docker/build/conf/dolphinscheduler/logback/logback-worker.xml b/docker/build/conf/dolphinscheduler/logback/logback-worker.xml index be1d0acde5..b7e08dd846 100644 --- a/docker/build/conf/dolphinscheduler/logback/logback-worker.xml +++ b/docker/build/conf/dolphinscheduler/logback/logback-worker.xml @@ -20,14 +20,6 @@ - - - - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n - - UTF-8 - - - diff --git a/docker/build/conf/nginx/dolphinscheduler.conf b/docker/build/conf/nginx/dolphinscheduler.conf deleted file mode 100644 index a594385a0e..0000000000 --- a/docker/build/conf/nginx/dolphinscheduler.conf +++ /dev/null @@ -1,51 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -server { - listen 8888; - server_name localhost; - #charset koi8-r; - #access_log /var/log/nginx/host.access.log main; - location / { - root /opt/dolphinscheduler/ui; - index index.html index.html; - } - location /dolphinscheduler/ui{ - alias /opt/dolphinscheduler/ui; - } - location /dolphinscheduler { - proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header x_real_ipP $remote_addr; - proxy_set_header remote_addr $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_http_version 1.1; - proxy_connect_timeout 300s; - proxy_read_timeout 300s; - proxy_send_timeout 300s; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - } - #error_page 404 /404.html; - # redirect server error pages to the static page /50x.html - # - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/share/nginx/html; - } -} diff --git a/docker/build/conf/zookeeper/zoo.cfg b/docker/build/conf/zookeeper/zoo.cfg deleted file mode 100644 index 94f92d0620..0000000000 --- a/docker/build/conf/zookeeper/zoo.cfg +++ /dev/null @@ -1,47 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -# do not use /tmp for storage, /tmp here is just -# example sakes. -dataDir=/tmp/zookeeper -# the port at which the clients will connect -clientPort=2181 -# the maximum number of client connections. -# increase this if you need to handle more clients -#maxClientCnxns=60 -# -# Be sure to read the maintenance section of the -# administrator guide before turning on autopurge. -# -# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance -# -# The number of snapshots to retain in dataDir -#autopurge.snapRetainCount=3 -# Purge task interval in hours -# Set to "0" to disable auto purge feature -#autopurge.purgeInterval=1 -#Four Letter Words commands:stat,ruok,conf,isro -4lw.commands.whitelist=* diff --git a/docker/build/hooks/build b/docker/build/hooks/build old mode 100644 new mode 100755 index 6362fdd299..ce7362bf55 --- a/docker/build/hooks/build +++ b/docker/build/hooks/build @@ -48,7 +48,12 @@ echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubati mv "$(pwd)"/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-"${VERSION}"-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/ # docker build -echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n" -sudo docker build --build-arg VERSION="${VERSION}" -t $DOCKER_REPO:"${VERSION}" "$(pwd)/docker/build/" +BUILD_COMMAND="docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/" +echo -e "$BUILD_COMMAND\n" +if (docker info 2> /dev/null | grep -i "ERROR"); then + sudo $BUILD_COMMAND +else + $BUILD_COMMAND +fi echo "------ dolphinscheduler end - build -------" diff --git a/docker/build/hooks/push b/docker/build/hooks/push old mode 100644 new mode 100755 diff --git a/docker/build/startup-init-conf.sh b/docker/build/startup-init-conf.sh old mode 100644 new mode 100755 index d5cd86f1a4..c3aadcd673 --- a/docker/build/startup-init-conf.sh +++ b/docker/build/startup-init-conf.sh @@ -39,9 +39,9 @@ export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"} export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"} export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""} -export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"} -export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"} -export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"} +export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"HDFS"} +export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/dolphinscheduler"} +export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"file:///"} export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"} export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"} export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"} @@ -81,7 +81,7 @@ export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"} #============================================================================ # alert plugin dir export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"} -# XLS FILE +# xls file export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"} # mail export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""} @@ -99,12 +99,6 @@ export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""} export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""} export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""} -#============================================================================ -# Frontend -#============================================================================ -export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"} -export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"} - echo "generate app config" ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do eval "cat << EOF @@ -112,7 +106,3 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line}) EOF " > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*} done - -echo "generate nginx config" -sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf -sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf \ No newline at end of file diff --git a/docker/build/startup.sh b/docker/build/startup.sh old mode 100644 new mode 100755 index 0511788d48..8bd1895cc0 --- a/docker/build/startup.sh +++ b/docker/build/startup.sh @@ -22,8 +22,8 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs -# start database -initDatabase() { +# wait database +waitDatabase() { echo "test ${DATABASE_TYPE} service" while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do counter=$((counter+1)) @@ -43,19 +43,22 @@ initDatabase() { exit 1 fi else - v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1") + v=$(PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1") if [ "$(echo ${v} | grep 'FATAL' | wc -l)" -eq 1 ]; then echo "Error: Can't connect to database...${v}" exit 1 fi fi +} +# init database +initDatabase() { echo "import sql data" ${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh } -# start zk -initZK() { +# wait zk +waitZK() { echo "connect remote zookeeper" echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do while ! nc -z ${line%:*} ${line#*:}; do @@ -70,12 +73,6 @@ initZK() { done } -# start nginx -initNginx() { - echo "start nginx" - nginx & -} - # start master-server initMasterServer() { echo "start master-server" @@ -115,59 +112,54 @@ initAlertServer() { printUsage() { echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system," echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n" - echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n" - printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend." + echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server ]\n" + printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server and alert-server" printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring." - printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.." - printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer." + printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services." + printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests and providing the front-end UI layer." printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms." - printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system." } # init config file source /root/startup-init-conf.sh -LOGFILE=/var/log/nginx/access.log case "$1" in (all) - initZK + waitZK + waitDatabase initDatabase initMasterServer initWorkerServer initApiServer initAlertServer initLoggerServer - initNginx - LOGFILE=/var/log/nginx/access.log + LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log ;; (master-server) - initZK - initDatabase + waitZK + waitDatabase initMasterServer LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log ;; (worker-server) - initZK - initDatabase + waitZK + waitDatabase initWorkerServer initLoggerServer LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log ;; (api-server) - initZK + waitZK + waitDatabase initDatabase initApiServer LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log ;; (alert-server) - initDatabase + waitDatabase initAlertServer LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log ;; - (frontend) - initNginx - LOGFILE=/var/log/nginx/access.log - ;; (help) printUsage exit 1 @@ -179,8 +171,7 @@ case "$1" in esac # init directories and log files -mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE} +mkdir -p ${DOLPHINSCHEDULER_LOGS} && cat /dev/null >> ${LOGFILE} echo "tail begin" exec bash -c "tail -n 1 -f ${LOGFILE}" - diff --git a/docker/docker-swarm/check b/docker/docker-swarm/check old mode 100644 new mode 100755 index 59203c5b3e..f4093664da --- a/docker/docker-swarm/check +++ b/docker/docker-swarm/check @@ -25,7 +25,7 @@ else echo "Server start failed "$server_num exit 1 fi -ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l` +ready=`curl http://127.0.0.1:12345/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l` if [ $ready -eq 1 ] then echo "Servers is ready" diff --git a/docker/docker-swarm/docker-compose.yml b/docker/docker-swarm/docker-compose.yml index 349b3ad790..04a9bc556d 100644 --- a/docker/docker-swarm/docker-compose.yml +++ b/docker/docker-swarm/docker-compose.yml @@ -31,6 +31,7 @@ services: volumes: - dolphinscheduler-postgresql:/bitnami/postgresql - dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d + restart: unless-stopped networks: - dolphinscheduler @@ -45,13 +46,14 @@ services: ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons volumes: - dolphinscheduler-zookeeper:/bitnami/zookeeper + restart: unless-stopped networks: - dolphinscheduler dolphinscheduler-api: image: apache/dolphinscheduler:latest container_name: dolphinscheduler-api - command: ["api-server"] + command: api-server ports: - 12345:12345 environment: @@ -62,6 +64,9 @@ services: DATABASE_PASSWORD: root DATABASE_DATABASE: dolphinscheduler ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + RESOURCE_STORAGE_TYPE: HDFS + RESOURCE_UPLOAD_PATH: /dolphinscheduler + FS_DEFAULT_FS: file:/// healthcheck: test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] interval: 30s @@ -72,37 +77,16 @@ services: - dolphinscheduler-postgresql - dolphinscheduler-zookeeper volumes: - - ./dolphinscheduler-logs:/opt/dolphinscheduler/logs - networks: - - dolphinscheduler - - dolphinscheduler-frontend: - image: apache/dolphinscheduler:latest - container_name: dolphinscheduler-frontend - command: ["frontend"] - ports: - - 8888:8888 - environment: - TZ: Asia/Shanghai - FRONTEND_API_SERVER_HOST: dolphinscheduler-api - FRONTEND_API_SERVER_PORT: 12345 - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "8888"] - interval: 30s - timeout: 5s - retries: 3 - start_period: 30s - depends_on: - - dolphinscheduler-api - volumes: - - ./dolphinscheduler-logs:/var/log/nginx + - dolphinscheduler-logs:/opt/dolphinscheduler/logs + - dolphinscheduler-resource-local:/dolphinscheduler + restart: unless-stopped networks: - dolphinscheduler dolphinscheduler-alert: image: apache/dolphinscheduler:latest container_name: dolphinscheduler-alert - command: ["alert-server"] + command: alert-server environment: TZ: Asia/Shanghai XLS_FILE_PATH: "/tmp/xls" @@ -133,14 +117,15 @@ services: depends_on: - dolphinscheduler-postgresql volumes: - - ./dolphinscheduler-logs:/opt/dolphinscheduler/logs + - dolphinscheduler-logs:/opt/dolphinscheduler/logs + restart: unless-stopped networks: - dolphinscheduler dolphinscheduler-master: image: apache/dolphinscheduler:latest container_name: dolphinscheduler-master - command: ["master-server"] + command: master-server ports: - 5678:5678 environment: @@ -168,14 +153,15 @@ services: - dolphinscheduler-postgresql - dolphinscheduler-zookeeper volumes: - - ./dolphinscheduler-logs:/opt/dolphinscheduler/logs + - dolphinscheduler-logs:/opt/dolphinscheduler/logs + restart: unless-stopped networks: - dolphinscheduler dolphinscheduler-worker: image: apache/dolphinscheduler:latest container_name: dolphinscheduler-worker - command: ["worker-server"] + command: worker-server ports: - 1234:1234 - 50051:50051 @@ -188,30 +174,40 @@ services: WORKER_RESERVED_MEMORY: "0.1" WORKER_GROUP: "default" WORKER_WEIGHT: "100" - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler + XLS_FILE_PATH: "/tmp/xls" + MAIL_SERVER_HOST: "" + MAIL_SERVER_PORT: "" + MAIL_SENDER: "" + MAIL_USER: "" + MAIL_PASSWD: "" + MAIL_SMTP_STARTTLS_ENABLE: "false" + MAIL_SMTP_SSL_ENABLE: "false" + MAIL_SMTP_SSL_TRUST: "" DATABASE_HOST: dolphinscheduler-postgresql DATABASE_PORT: 5432 DATABASE_USERNAME: root DATABASE_PASSWORD: root DATABASE_DATABASE: dolphinscheduler ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + RESOURCE_STORAGE_TYPE: HDFS + RESOURCE_UPLOAD_PATH: /dolphinscheduler + FS_DEFAULT_FS: file:/// healthcheck: test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] interval: 30s timeout: 5s retries: 3 start_period: 30s - depends_on: + depends_on: - dolphinscheduler-postgresql - dolphinscheduler-zookeeper volumes: - - type: bind - source: ./dolphinscheduler_env.sh - target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh - - type: volume - source: dolphinscheduler-worker-data - target: /tmp/dolphinscheduler - - ./dolphinscheduler-logs:/opt/dolphinscheduler/logs + - ./dolphinscheduler_env.sh:/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh + - dolphinscheduler-worker-data:/tmp/dolphinscheduler + - dolphinscheduler-logs:/opt/dolphinscheduler/logs + - dolphinscheduler-resource-local:/dolphinscheduler + restart: unless-stopped networks: - dolphinscheduler @@ -224,7 +220,5 @@ volumes: dolphinscheduler-postgresql-initdb: dolphinscheduler-zookeeper: dolphinscheduler-worker-data: - -configs: - dolphinscheduler-worker-task-env: - file: ./dolphinscheduler_env.sh \ No newline at end of file + dolphinscheduler-logs: + dolphinscheduler-resource-local: \ No newline at end of file diff --git a/docker/docker-swarm/docker-stack.yml b/docker/docker-swarm/docker-stack.yml index dff4a47b2c..39c36347bb 100644 --- a/docker/docker-swarm/docker-stack.yml +++ b/docker/docker-swarm/docker-stack.yml @@ -20,13 +20,13 @@ services: dolphinscheduler-postgresql: image: bitnami/postgresql:latest + ports: + - 5432:5432 environment: TZ: Asia/Shanghai POSTGRESQL_USERNAME: root POSTGRESQL_PASSWORD: root POSTGRESQL_DATABASE: dolphinscheduler - ports: - - 5432:5432 volumes: - dolphinscheduler-postgresql:/bitnami/postgresql networks: @@ -37,12 +37,12 @@ services: dolphinscheduler-zookeeper: image: bitnami/zookeeper:latest + ports: + - 2181:2181 environment: TZ: Asia/Shanghai ALLOW_ANONYMOUS_LOGIN: "yes" ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons - ports: - - 2181:2181 volumes: - dolphinscheduler-zookeeper:/bitnami/zookeeper networks: @@ -53,7 +53,9 @@ services: dolphinscheduler-api: image: apache/dolphinscheduler:latest - command: ["api-server"] + command: api-server + ports: + - 12345:12345 environment: TZ: Asia/Shanghai DATABASE_HOST: dolphinscheduler-postgresql @@ -62,39 +64,17 @@ services: DATABASE_PASSWORD: root DATABASE_DATABASE: dolphinscheduler ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 - ports: - - 12345:12345 + RESOURCE_STORAGE_TYPE: HDFS + RESOURCE_UPLOAD_PATH: /dolphinscheduler + FS_DEFAULT_FS: file:/// healthcheck: test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] - interval: 30 + interval: 30s timeout: 5s retries: 3 start_period: 30s volumes: - dolphinscheduler-logs:/opt/dolphinscheduler/logs - networks: - - dolphinscheduler - deploy: - mode: replicated - replicas: 1 - - dolphinscheduler-frontend: - image: apache/dolphinscheduler:latest - command: ["frontend"] - ports: - - 8888:8888 - environment: - TZ: Asia/Shanghai - FRONTEND_API_SERVER_HOST: dolphinscheduler-api - FRONTEND_API_SERVER_PORT: 12345 - healthcheck: - test: ["CMD", "nc", "-z", "localhost", "8888"] - interval: 30 - timeout: 5s - retries: 3 - start_period: 30s - volumes: - - dolphinscheduler-logs:/var/log/nginx networks: - dolphinscheduler deploy: @@ -103,7 +83,7 @@ services: dolphinscheduler-alert: image: apache/dolphinscheduler:latest - command: ["alert-server"] + command: alert-server environment: TZ: Asia/Shanghai XLS_FILE_PATH: "/tmp/xls" @@ -127,13 +107,13 @@ services: DATABASE_DATABASE: dolphinscheduler healthcheck: test: ["CMD", "/root/checkpoint.sh", "AlertServer"] - interval: 30 + interval: 30s timeout: 5s retries: 3 start_period: 30s volumes: - dolphinscheduler-logs:/opt/dolphinscheduler/logs - networks: + networks: - dolphinscheduler deploy: mode: replicated @@ -141,10 +121,10 @@ services: dolphinscheduler-master: image: apache/dolphinscheduler:latest - command: ["master-server"] - ports: + command: master-server + ports: - 5678:5678 - environment: + environment: TZ: Asia/Shanghai MASTER_EXEC_THREADS: "100" MASTER_EXEC_TASK_NUM: "20" @@ -161,7 +141,7 @@ services: ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 healthcheck: test: ["CMD", "/root/checkpoint.sh", "MasterServer"] - interval: 30 + interval: 30s timeout: 5s retries: 3 start_period: 30s @@ -175,11 +155,11 @@ services: dolphinscheduler-worker: image: apache/dolphinscheduler:latest - command: ["worker-server"] - ports: + command: worker-server + ports: - 1234:1234 - 50051:50051 - environment: + environment: TZ: Asia/Shanghai WORKER_EXEC_THREADS: "100" WORKER_HEARTBEAT_INTERVAL: "10" @@ -188,25 +168,37 @@ services: WORKER_RESERVED_MEMORY: "0.1" WORKER_GROUP: "default" WORKER_WEIGHT: "100" - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler + XLS_FILE_PATH: "/tmp/xls" + MAIL_SERVER_HOST: "" + MAIL_SERVER_PORT: "" + MAIL_SENDER: "" + MAIL_USER: "" + MAIL_PASSWD: "" + MAIL_SMTP_STARTTLS_ENABLE: "false" + MAIL_SMTP_SSL_ENABLE: "false" + MAIL_SMTP_SSL_TRUST: "" DATABASE_HOST: dolphinscheduler-postgresql DATABASE_PORT: 5432 DATABASE_USERNAME: root DATABASE_PASSWORD: root DATABASE_DATABASE: dolphinscheduler ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + RESOURCE_STORAGE_TYPE: HDFS + RESOURCE_UPLOAD_PATH: /dolphinscheduler + FS_DEFAULT_FS: file:/// healthcheck: test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] - interval: 30 + interval: 30s timeout: 5s retries: 3 start_period: 30s - volumes: - - dolphinscheduler-worker-data:/tmp/dolphinscheduler - - dolphinscheduler-logs:/opt/dolphinscheduler/logs configs: - source: dolphinscheduler-worker-task-env target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh + volumes: + - dolphinscheduler-worker-data:/tmp/dolphinscheduler + - dolphinscheduler-logs:/opt/dolphinscheduler/logs networks: - dolphinscheduler deploy: diff --git a/docker/docker-swarm/dolphinscheduler_env.sh b/docker/docker-swarm/dolphinscheduler_env.sh old mode 100644 new mode 100755 index 654318cb41..7fd39335ae --- a/docker/docker-swarm/dolphinscheduler_env.sh +++ b/docker/docker-swarm/dolphinscheduler_env.sh @@ -15,6 +15,14 @@ # limitations under the License. # -export PYTHON_HOME=/usr/bin/python2 +export HADOOP_HOME=/opt/soft/hadoop +export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop +export SPARK_HOME1=/opt/soft/spark1 +export SPARK_HOME2=/opt/soft/spark2 +export PYTHON_HOME=/usr/bin/python export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk -export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH \ No newline at end of file +export HIVE_HOME=/opt/soft/hive +export FLINK_HOME=/opt/soft/flink +export DATAX_HOME=/opt/soft/datax/bin/datax.py + +export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH diff --git a/docker/kubernetes/dolphinscheduler/Chart.yaml b/docker/kubernetes/dolphinscheduler/Chart.yaml index 9d640869dd..1e0ca755b9 100644 --- a/docker/kubernetes/dolphinscheduler/Chart.yaml +++ b/docker/kubernetes/dolphinscheduler/Chart.yaml @@ -22,7 +22,7 @@ home: https://dolphinscheduler.apache.org icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg keywords: - dolphinscheduler -- Scheduler +- scheduler # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives @@ -35,18 +35,18 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 1.0.0 +version: 1.2.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 1.3.0 +appVersion: 1.4.0 dependencies: - name: postgresql - version: 8.x.x + version: 10.x.x repository: https://charts.bitnami.com/bitnami condition: postgresql.enabled - name: zookeeper - version: 5.x.x + version: 6.x.x repository: https://charts.bitnami.com/bitnami condition: zookeeper.enabled diff --git a/docker/kubernetes/dolphinscheduler/README.md b/docker/kubernetes/dolphinscheduler/README.md index 0691b1ed2e..318c3a9132 100644 --- a/docker/kubernetes/dolphinscheduler/README.md +++ b/docker/kubernetes/dolphinscheduler/README.md @@ -7,19 +7,20 @@ This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) ## Prerequisites -- Kubernetes 1.10+ +- Helm 3.1.0+ +- Kubernetes 1.12+ - PV provisioner support in the underlying infrastructure ## Installing the Chart -To install the chart with the release name `my-release`: +To install the chart with the release name `dolphinscheduler`: ```bash $ git clone https://github.com/apache/incubator-dolphinscheduler.git -$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler +$ cd incubator-dolphinscheduler/docker/kubernetes/dolphinscheduler $ helm repo add bitnami https://charts.bitnami.com/bitnami $ helm dependency update . -$ helm install --name dolphinscheduler . +$ helm install dolphinscheduler . ``` These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. @@ -30,7 +31,7 @@ These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default To uninstall/delete the `dolphinscheduler` deployment: ```bash -$ helm delete --purge dolphinscheduler +$ helm uninstall dolphinscheduler ``` The command removes all the Kubernetes components associated with the chart and deletes the release. @@ -220,32 +221,6 @@ The following tables lists the configurable parameters of the Dolphins Scheduler | `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | | `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | | | | | -| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `frontend.tolerations` | If specified, the pod's tolerations | `{}` | -| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` | -| `frontend.annotations` | The `annotations` for frontend server. | `{}` | -| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` | -| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | | `ingress.enabled` | Enable ingress | `false` | | `ingress.host` | Ingress host | `dolphinscheduler.org` | | `ingress.path` | Ingress path | `/` | diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml b/docker/kubernetes/dolphinscheduler/requirements.yaml deleted file mode 100644 index e219975995..0000000000 --- a/docker/kubernetes/dolphinscheduler/requirements.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -dependencies: -- name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled -- name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD b/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD deleted file mode 100644 index e219975995..0000000000 --- a/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -dependencies: -- name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled -- name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD_0 b/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD_0 deleted file mode 100644 index e219975995..0000000000 --- a/docker/kubernetes/dolphinscheduler/requirements.yaml~HEAD_0 +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -dependencies: -- name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled -- name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml~dev b/docker/kubernetes/dolphinscheduler/requirements.yaml~dev deleted file mode 100644 index e219975995..0000000000 --- a/docker/kubernetes/dolphinscheduler/requirements.yaml~dev +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -dependencies: -- name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled -- name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml~dev_0 b/docker/kubernetes/dolphinscheduler/requirements.yaml~dev_0 deleted file mode 100644 index e219975995..0000000000 --- a/docker/kubernetes/dolphinscheduler/requirements.yaml~dev_0 +++ /dev/null @@ -1,25 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -dependencies: -- name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled -- name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/NOTES.txt b/docker/kubernetes/dolphinscheduler/templates/NOTES.txt index 256c53ca57..8afe766d27 100644 --- a/docker/kubernetes/dolphinscheduler/templates/NOTES.txt +++ b/docker/kubernetes/dolphinscheduler/templates/NOTES.txt @@ -15,9 +15,9 @@ # limitations under the License. # -** Please be patient while the chart is being deployed ** +** Please be patient while the chart Dolphinscheduler {{ .Chart.AppVersion }} is being deployed ** -1. Get the Dolphinscheduler URL by running: +Get the Dolphinscheduler URL by running: {{- if .Values.ingress.enabled }} @@ -26,6 +26,6 @@ {{- else }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888 + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-api 12345:12345 {{- end }} diff --git a/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl b/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl index 1f121dfea8..ccfc746645 100644 --- a/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl +++ b/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl @@ -135,7 +135,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this Create a default dolphinscheduler worker base dir. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). */}} -{{- define "dolphinscheduler.worker.base.dir" -}} -{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}} +{{- define "dolphinscheduler.data.basedir.path" -}} +{{- $name := default "/tmp/dolphinscheduler" .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}} {{- printf "%s" $name | trunc 63 | trimSuffix "/" -}} {{- end -}} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml index 2c5b76c3f0..2c7dd67c57 100644 --- a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml @@ -24,6 +24,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: + DOLPHINSCHEDULER_OPTS: {{ .Values.alert.configmap.DOLPHINSCHEDULER_OPTS | quote }} ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }} XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }} MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }} diff --git a/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml similarity index 69% rename from docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml rename to docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml index 60d0d6e7b5..3845f1f67c 100644 --- a/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-api.yaml @@ -14,22 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # +{{- if .Values.api.configmap }} apiVersion: v1 -kind: Service +kind: ConfigMap metadata: - name: {{ include "dolphinscheduler.fullname" . }}-frontend + name: {{ include "dolphinscheduler.fullname" . }}-api labels: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend + app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - ports: - - port: 8888 - targetPort: tcp-port - protocol: TCP - name: tcp-port - selector: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/component: frontend \ No newline at end of file +data: + DOLPHINSCHEDULER_OPTS: {{ .Values.api.configmap.DOLPHINSCHEDULER_OPTS | quote }} +{{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml index 931f6d48e4..1fcb5b5992 100644 --- a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-common.yaml @@ -24,12 +24,14 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: - DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }} - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }} + DOLPHINSCHEDULER_ENV: |- + {{- range .Values.common.configmap.DOLPHINSCHEDULER_ENV }} + {{ . }} + {{- end }} + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.data.basedir.path" . | quote }} RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }} RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }} FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }} FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }} FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }} - FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }} {{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml index da82d639cb..7c1be7717e 100644 --- a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml @@ -24,6 +24,7 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: + DOLPHINSCHEDULER_OPTS: {{ .Values.master.configmap.DOLPHINSCHEDULER_OPTS | quote }} MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }} MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }} MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }} @@ -32,5 +33,4 @@ data: MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }} MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }} MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }} - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} {{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml index 569341c225..78b6ed1036 100644 --- a/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml @@ -24,17 +24,12 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: + DOLPHINSCHEDULER_OPTS: {{ .Values.worker.configmap.DOLPHINSCHEDULER_OPTS | quote }} WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }} WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }} - WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }} WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }} WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }} WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }} WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }} WORKER_WEIGHT: {{ .Values.worker.configmap.WORKER_WEIGHT | quote }} - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} - dolphinscheduler_env.sh: |- - {{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }} - {{ . }} - {{- end }} {{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml index fb9a23786b..e4a2b21ffa 100644 --- a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml @@ -57,35 +57,6 @@ spec: {{- if .Values.alert.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - initContainers: - - name: init-database - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to database." - exit 1 - fi - echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter." - sleep 60 - done - env: - - name: DATABASE_HOST - {{- if .Values.postgresql.enabled }} - value: {{ template "dolphinscheduler.postgresql.fullname" . }} - {{- else }} - value: {{ .Values.externalDatabase.host | quote }} - {{- end }} - - name: DATABASE_PORT - {{- if .Values.postgresql.enabled }} - value: "5432" - {{- else }} - value: {{ .Values.externalDatabase.port | quote }} - {{- end }} {{- if .Values.image.pullSecrets }} imagePullSecrets: - name: {{ include "dolphinscheduler.image.pullSecrets" . }} @@ -93,14 +64,17 @@ spec: containers: - name: {{ include "dolphinscheduler.fullname" . }}-alert image: {{ include "dolphinscheduler.image.repository" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "alert-server" - imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: DOLPHINSCHEDULER_OPTS - value: {{ default "" .Values.alert.jvmOptions }} - name: TZ value: {{ .Values.timezone }} + - name: DOLPHINSCHEDULER_OPTS + valueFrom: + configMapKeyRef: + key: DOLPHINSCHEDULER_OPTS + name: {{ include "dolphinscheduler.fullname" . }}-alert - name: ALERT_PLUGIN_DIR valueFrom: configMapKeyRef: @@ -228,36 +202,6 @@ spec: {{- else }} value: {{ .Values.externalDatabase.params | quote }} {{- end }} - - name: RESOURCE_STORAGE_TYPE - valueFrom: - configMapKeyRef: - key: RESOURCE_STORAGE_TYPE - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: RESOURCE_UPLOAD_PATH - valueFrom: - configMapKeyRef: - key: RESOURCE_UPLOAD_PATH - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_DEFAULT_FS - valueFrom: - configMapKeyRef: - key: FS_DEFAULT_FS - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_ENDPOINT - valueFrom: - configMapKeyRef: - key: FS_S3A_ENDPOINT - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_ACCESS_KEY - valueFrom: - configMapKeyRef: - key: FS_S3A_ACCESS_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_SECRET_KEY - valueFrom: - configMapKeyRef: - key: FS_S3A_SECRET_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common {{- if .Values.alert.resources }} resources: limits: diff --git a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml index 92c2c72398..563f9ebbf9 100644 --- a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml @@ -57,35 +57,6 @@ spec: {{- if .Values.api.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - initContainers: - - name: init-database - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to database." - exit 1 - fi - echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter." - sleep 60 - done - env: - - name: DATABASE_HOST - {{- if .Values.postgresql.enabled }} - value: {{ template "dolphinscheduler.postgresql.fullname" . }} - {{- else }} - value: {{ .Values.externalDatabase.host | quote }} - {{- end }} - - name: DATABASE_PORT - {{- if .Values.postgresql.enabled }} - value: "5432" - {{- else }} - value: {{ .Values.externalDatabase.port | quote }} - {{- end }} {{- if .Values.image.pullSecrets }} imagePullSecrets: - name: {{ include "dolphinscheduler.image.pullSecrets" . }} @@ -93,17 +64,20 @@ spec: containers: - name: {{ include "dolphinscheduler.fullname" . }}-api image: {{ include "dolphinscheduler.image.repository" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "api-server" ports: - containerPort: 12345 name: tcp-port - imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: DOLPHINSCHEDULER_OPTS - value: {{ default "" .Values.api.jvmOptions }} - name: TZ value: {{ .Values.timezone }} + - name: DOLPHINSCHEDULER_OPTS + valueFrom: + configMapKeyRef: + key: DOLPHINSCHEDULER_OPTS + name: {{ include "dolphinscheduler.fullname" . }}-api - name: DATABASE_TYPE {{- if .Values.postgresql.enabled }} value: "postgresql" @@ -164,7 +138,7 @@ spec: {{- end }} - name: ZOOKEEPER_ROOT {{- if .Values.zookeeper.enabled }} - value: "/dolphinscheduler" + value: {{ .Values.zookeeper.zookeeperRoot }} {{- else }} value: {{ .Values.externalZookeeper.zookeeperRoot }} {{- end }} @@ -183,6 +157,7 @@ spec: configMapKeyRef: key: FS_DEFAULT_FS name: {{ include "dolphinscheduler.fullname" . }}-common + {{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }} - name: FS_S3A_ENDPOINT valueFrom: configMapKeyRef: @@ -195,9 +170,10 @@ spec: name: {{ include "dolphinscheduler.fullname" . }}-common - name: FS_S3A_SECRET_KEY valueFrom: - configMapKeyRef: - key: FS_S3A_SECRET_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common + secretKeyRef: + key: fs-s3a-secret-key + name: {{ printf "%s-%s" .Release.Name "fs-s3a" }} + {{- end }} {{- if .Values.api.resources }} resources: limits: diff --git a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml deleted file mode 100644 index 04b9408d9f..0000000000 --- a/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml +++ /dev/null @@ -1,119 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "dolphinscheduler.fullname" . }}-frontend - labels: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/component: frontend -spec: - replicas: {{ .Values.frontend.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/component: frontend - strategy: - type: {{ .Values.frontend.strategy.type | quote }} - rollingUpdate: - maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }} - maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/component: frontend - {{- if .Values.alert.annotations }} - annotations: - {{- toYaml .Values.alert.annotations | nindent 8 }} - {{- end }} - spec: - {{- if .Values.frontend.affinity }} - affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }} - {{- end }} - {{- if .Values.frontend.nodeSelector }} - nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }} - {{- end }} - {{- if .Values.frontend.tolerations }} - tolerations: {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.image.pullSecrets }} - imagePullSecrets: - - name: {{ include "dolphinscheduler.image.pullSecrets" . }} - {{- end }} - containers: - - name: {{ include "dolphinscheduler.fullname" . }}-frontend - image: {{ include "dolphinscheduler.image.repository" . | quote }} - args: - - "frontend" - ports: - - containerPort: 8888 - name: tcp-port - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: TZ - value: {{ .Values.timezone }} - - name: FRONTEND_API_SERVER_HOST - value: '{{ include "dolphinscheduler.fullname" . }}-api' - - name: FRONTEND_API_SERVER_PORT - value: "12345" - {{- if .Values.frontend.resources }} - resources: - limits: - memory: {{ .Values.frontend.resources.limits.memory | quote }} - cpu: {{ .Values.frontend.resources.limits.cpu | quote }} - requests: - memory: {{ .Values.frontend.resources.requests.memory | quote }} - cpu: {{ .Values.frontend.resources.requests.cpu | quote }} - {{- end }} - {{- if .Values.frontend.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: 8888 - initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }} - {{- end }} - {{- if .Values.frontend.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: 8888 - initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }} - {{- end }} - volumeMounts: - - mountPath: "/var/log/nginx" - name: {{ include "dolphinscheduler.fullname" . }}-frontend - volumes: - - name: {{ include "dolphinscheduler.fullname" . }}-frontend - {{- if .Values.frontend.persistentVolumeClaim.enabled }} - persistentVolumeClaim: - claimName: {{ include "dolphinscheduler.fullname" . }}-frontend - {{- else }} - emptyDir: {} - {{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/ingress.yaml b/docker/kubernetes/dolphinscheduler/templates/ingress.yaml index d0f923dcf1..6edc82b12a 100644 --- a/docker/kubernetes/dolphinscheduler/templates/ingress.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/ingress.yaml @@ -30,7 +30,7 @@ spec: paths: - path: {{ .Values.ingress.path }} backend: - serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend + serviceName: {{ include "dolphinscheduler.fullname" . }}-api servicePort: tcp-port {{- if .Values.ingress.tls.enabled }} tls: diff --git a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml index 7f74cd94ae..fd5e88afb9 100644 --- a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml @@ -25,7 +25,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.alert.persistentVolumeClaim.accessModes }} + {{- range .Values.alert.persistentVolumeClaim.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }} diff --git a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml index c1074cc2b1..48292966ad 100644 --- a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml @@ -25,7 +25,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.api.persistentVolumeClaim.accessModes }} + {{- range .Values.api.persistentVolumeClaim.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }} diff --git a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml similarity index 66% rename from docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml rename to docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml index ac9fe02a9e..78e7440392 100644 --- a/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/secret-external-fs-s3a.yaml @@ -14,22 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- if .Values.frontend.persistentVolumeClaim.enabled }} +{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }} apiVersion: v1 -kind: PersistentVolumeClaim +kind: Secret metadata: - name: {{ include "dolphinscheduler.fullname" . }}-frontend + name: {{ printf "%s-%s" .Release.Name "fs-s3a" }} labels: - app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend + app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-fs-s3a app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - accessModes: - {{- range .Values.frontend.persistentVolumeClaim.accessModes }} - - {{ . | quote }} - {{- end }} - storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }} - resources: - requests: - storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }} +type: Opaque +data: + fs-s3a-secret-key: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | b64enc | quote }} {{- end }} \ No newline at end of file diff --git a/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml index e9dc7919ca..85633a9f33 100644 --- a/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml @@ -54,59 +54,6 @@ spec: {{- if .Values.master.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - initContainers: - - name: init-zookeeper - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do - while ! nc -z ${line%:*} ${line#*:}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to zookeeper." - exit 1 - fi - echo "Trying to connect to zookeeper at ${line}. Attempt $counter." - sleep 60 - done - done - env: - - name: ZOOKEEPER_QUORUM - {{- if .Values.zookeeper.enabled }} - value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}" - {{- else }} - value: {{ .Values.externalZookeeper.zookeeperQuorum }} - {{- end }} - - name: init-database - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to database." - exit 1 - fi - echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter." - sleep 60 - done - env: - - name: DATABASE_HOST - {{- if .Values.postgresql.enabled }} - value: {{ template "dolphinscheduler.postgresql.fullname" . }} - {{- else }} - value: {{ .Values.externalDatabase.host | quote }} - {{- end }} - - name: DATABASE_PORT - {{- if .Values.postgresql.enabled }} - value: "5432" - {{- else }} - value: {{ .Values.externalDatabase.port | quote }} - {{- end }} {{- if .Values.image.pullSecrets }} imagePullSecrets: - name: {{ include "dolphinscheduler.image.pullSecrets" . }} @@ -114,17 +61,20 @@ spec: containers: - name: {{ include "dolphinscheduler.fullname" . }}-master image: {{ include "dolphinscheduler.image.repository" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "master-server" ports: - containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} name: "master-port" - imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: DOLPHINSCHEDULER_OPTS - value: {{ default "" .Values.master.jvmOptions }} - name: TZ value: {{ .Values.timezone }} + - name: DOLPHINSCHEDULER_OPTS + valueFrom: + configMapKeyRef: + key: DOLPHINSCHEDULER_OPTS + name: {{ include "dolphinscheduler.fullname" . }}-master - name: MASTER_EXEC_THREADS valueFrom: configMapKeyRef: @@ -168,7 +118,7 @@ spec: - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH valueFrom: configMapKeyRef: - name: {{ include "dolphinscheduler.fullname" . }}-master + name: {{ include "dolphinscheduler.fullname" . }}-common key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: DATABASE_TYPE {{- if .Values.postgresql.enabled }} @@ -230,40 +180,10 @@ spec: {{- end }} - name: ZOOKEEPER_ROOT {{- if .Values.zookeeper.enabled }} - value: "/dolphinscheduler" + value: {{ .Values.zookeeper.zookeeperRoot }} {{- else }} value: {{ .Values.externalZookeeper.zookeeperRoot }} {{- end }} - - name: RESOURCE_STORAGE_TYPE - valueFrom: - configMapKeyRef: - key: RESOURCE_STORAGE_TYPE - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: RESOURCE_UPLOAD_PATH - valueFrom: - configMapKeyRef: - key: RESOURCE_UPLOAD_PATH - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_DEFAULT_FS - valueFrom: - configMapKeyRef: - key: FS_DEFAULT_FS - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_ENDPOINT - valueFrom: - configMapKeyRef: - key: FS_S3A_ENDPOINT - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_ACCESS_KEY - valueFrom: - configMapKeyRef: - key: FS_S3A_ACCESS_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: FS_S3A_SECRET_KEY - valueFrom: - configMapKeyRef: - key: FS_S3A_SECRET_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common {{- if .Values.master.resources }} resources: limits: diff --git a/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml index ae562cc62b..b8a62fa6ae 100644 --- a/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml @@ -54,59 +54,6 @@ spec: {{- if .Values.worker.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} - initContainers: - - name: init-zookeeper - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do - while ! nc -z ${line%:*} ${line#*:}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to zookeeper." - exit 1 - fi - echo "Trying to connect to zookeeper at ${line}. Attempt $counter." - sleep 60 - done - done - env: - - name: ZOOKEEPER_QUORUM - {{- if .Values.zookeeper.enabled }} - value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}" - {{- else }} - value: {{ .Values.externalZookeeper.zookeeperQuorum }} - {{- end }} - - name: init-database - image: busybox:1.31.0 - command: - - /bin/sh - - -ec - - | - while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do - counter=$((counter+1)) - if [ $counter == 5 ]; then - echo "Error: Couldn't connect to database." - exit 1 - fi - echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter." - sleep 60 - done - env: - - name: DATABASE_HOST - {{- if .Values.postgresql.enabled }} - value: {{ template "dolphinscheduler.postgresql.fullname" . }} - {{- else }} - value: {{ .Values.externalDatabase.host | quote }} - {{- end }} - - name: DATABASE_PORT - {{- if .Values.postgresql.enabled }} - value: "5432" - {{- else }} - value: {{ .Values.externalDatabase.port | quote }} - {{- end }} {{- if .Values.image.pullSecrets }} imagePullSecrets: - name: {{ include "dolphinscheduler.image.pullSecrets" . }} @@ -114,29 +61,27 @@ spec: containers: - name: {{ include "dolphinscheduler.fullname" . }}-worker image: {{ include "dolphinscheduler.image.repository" . | quote }} + imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "worker-server" ports: - containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} name: "worker-port" - containerPort: 50051 - name: "logs-port" - imagePullPolicy: {{ .Values.image.pullPolicy }} + name: "logger-port" env: - - name: DOLPHINSCHEDULER_OPTS - value: {{ default "" .Values.worker.jvmOptions }} - name: TZ value: {{ .Values.timezone }} - - name: WORKER_EXEC_THREADS + - name: DOLPHINSCHEDULER_OPTS valueFrom: configMapKeyRef: + key: DOLPHINSCHEDULER_OPTS name: {{ include "dolphinscheduler.fullname" . }}-worker - key: WORKER_EXEC_THREADS - - name: WORKER_FETCH_TASK_NUM + - name: WORKER_EXEC_THREADS valueFrom: configMapKeyRef: name: {{ include "dolphinscheduler.fullname" . }}-worker - key: WORKER_FETCH_TASK_NUM + key: WORKER_EXEC_THREADS - name: WORKER_HEARTBEAT_INTERVAL valueFrom: configMapKeyRef: @@ -170,8 +115,58 @@ spec: - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH valueFrom: configMapKeyRef: - name: {{ include "dolphinscheduler.fullname" . }}-master + name: {{ include "dolphinscheduler.fullname" . }}-common key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH + - name: ALERT_PLUGIN_DIR + valueFrom: + configMapKeyRef: + key: ALERT_PLUGIN_DIR + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: XLS_FILE_PATH + valueFrom: + configMapKeyRef: + key: XLS_FILE_PATH + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SERVER_HOST + valueFrom: + configMapKeyRef: + key: MAIL_SERVER_HOST + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SERVER_PORT + valueFrom: + configMapKeyRef: + key: MAIL_SERVER_PORT + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SENDER + valueFrom: + configMapKeyRef: + key: MAIL_SENDER + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_USER + valueFrom: + configMapKeyRef: + key: MAIL_USER + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_PASSWD + valueFrom: + configMapKeyRef: + key: MAIL_PASSWD + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SMTP_STARTTLS_ENABLE + valueFrom: + configMapKeyRef: + key: MAIL_SMTP_STARTTLS_ENABLE + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SMTP_SSL_ENABLE + valueFrom: + configMapKeyRef: + key: MAIL_SMTP_SSL_ENABLE + name: {{ include "dolphinscheduler.fullname" . }}-alert + - name: MAIL_SMTP_SSL_TRUST + valueFrom: + configMapKeyRef: + key: MAIL_SMTP_SSL_TRUST + name: {{ include "dolphinscheduler.fullname" . }}-alert - name: DATABASE_TYPE {{- if .Values.postgresql.enabled }} value: "postgresql" @@ -232,7 +227,7 @@ spec: {{- end }} - name: ZOOKEEPER_ROOT {{- if .Values.zookeeper.enabled }} - value: "/dolphinscheduler" + value: {{ .Values.zookeeper.zookeeperRoot }} {{- else }} value: {{ .Values.externalZookeeper.zookeeperRoot }} {{- end }} @@ -251,6 +246,7 @@ spec: configMapKeyRef: key: FS_DEFAULT_FS name: {{ include "dolphinscheduler.fullname" . }}-common + {{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }} - name: FS_S3A_ENDPOINT valueFrom: configMapKeyRef: @@ -263,54 +259,10 @@ spec: name: {{ include "dolphinscheduler.fullname" . }}-common - name: FS_S3A_SECRET_KEY valueFrom: - configMapKeyRef: - key: FS_S3A_SECRET_KEY - name: {{ include "dolphinscheduler.fullname" . }}-common - - name: XLS_FILE_PATH - valueFrom: - configMapKeyRef: - key: XLS_FILE_PATH - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SERVER_HOST - valueFrom: - configMapKeyRef: - key: MAIL_SERVER_HOST - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SERVER_PORT - valueFrom: - configMapKeyRef: - key: MAIL_SERVER_PORT - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SENDER - valueFrom: - configMapKeyRef: - key: MAIL_SENDER - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_USER - valueFrom: - configMapKeyRef: - key: MAIL_USER - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_PASSWD - valueFrom: - configMapKeyRef: - key: MAIL_PASSWD - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SMTP_STARTTLS_ENABLE - valueFrom: - configMapKeyRef: - key: MAIL_SMTP_STARTTLS_ENABLE - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SMTP_SSL_ENABLE - valueFrom: - configMapKeyRef: - key: MAIL_SMTP_SSL_ENABLE - name: {{ include "dolphinscheduler.fullname" . }}-alert - - name: MAIL_SMTP_SSL_TRUST - valueFrom: - configMapKeyRef: - key: MAIL_SMTP_SSL_TRUST - name: {{ include "dolphinscheduler.fullname" . }}-alert + secretKeyRef: + key: fs-s3a-secret-key + name: {{ printf "%s-%s" .Release.Name "fs-s3a" }} + {{- end }} - name: ENTERPRISE_WECHAT_ENABLE valueFrom: configMapKeyRef: @@ -372,13 +324,13 @@ spec: failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }} {{- end }} volumeMounts: - - mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }} + - mountPath: {{ include "dolphinscheduler.data.basedir.path" . | quote }} name: {{ include "dolphinscheduler.fullname" . }}-worker-data - mountPath: "/opt/dolphinscheduler/logs" name: {{ include "dolphinscheduler.fullname" . }}-worker-logs - mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh" subPath: "dolphinscheduler_env.sh" - name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap + name: {{ include "dolphinscheduler.fullname" . }}-common-env volumes: - name: {{ include "dolphinscheduler.fullname" . }}-worker-data {{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }} @@ -394,12 +346,12 @@ spec: {{- else }} emptyDir: {} {{- end }} - - name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap + - name: {{ include "dolphinscheduler.fullname" . }}-common-env configMap: defaultMode: 0777 - name: {{ include "dolphinscheduler.fullname" . }}-worker + name: {{ include "dolphinscheduler.fullname" . }}-common items: - - key: dolphinscheduler_env.sh + - key: DOLPHINSCHEDULER_ENV path: dolphinscheduler_env.sh {{- if .Values.worker.persistentVolumeClaim.enabled }} volumeClaimTemplates: diff --git a/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml index fb3b85b5c3..5c4e75436f 100644 --- a/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml @@ -30,9 +30,9 @@ spec: protocol: TCP name: worker-port - port: 50051 - targetPort: logs-port + targetPort: logger-port protocol: TCP - name: logs-port + name: logger-port selector: app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/docker/kubernetes/dolphinscheduler/values.yaml b/docker/kubernetes/dolphinscheduler/values.yaml index 3261b08401..61201dfaa7 100644 --- a/docker/kubernetes/dolphinscheduler/values.yaml +++ b/docker/kubernetes/dolphinscheduler/values.yaml @@ -58,62 +58,74 @@ externalDatabase: # If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it. zookeeper: enabled: true - taskQueue: "zookeeper" - config: null + fourlwCommandsWhitelist: srvr,ruok,wchs,cons service: port: "2181" persistence: enabled: false size: "20Gi" storageClass: "-" + zookeeperRoot: "/dolphinscheduler" # If exists external zookeeper, and set zookeeper.enable value to false. -# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it. +# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it. externalZookeeper: - taskQueue: "zookeeper" zookeeperQuorum: "127.0.0.1:2181" zookeeperRoot: "/dolphinscheduler" common: + ## ConfigMap configmap: - DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env" - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files" - RESOURCE_STORAGE_TYPE: "NONE" - RESOURCE_UPLOAD_PATH: "/ds" - FS_DEFAULT_FS: "s3a://xxxx" + DOLPHINSCHEDULER_ENV: + - "export HADOOP_HOME=/opt/soft/hadoop" + - "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop" + - "export SPARK_HOME1=/opt/soft/spark1" + - "export SPARK_HOME2=/opt/soft/spark2" + - "export PYTHON_HOME=/usr/bin/python" + - "export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk" + - "export HIVE_HOME=/opt/soft/hive" + - "export FLINK_HOME=/opt/soft/flink" + - "export DATAX_HOME=/opt/soft/datax/bin/datax.py" + - "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH" + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" + RESOURCE_STORAGE_TYPE: "HDFS" + RESOURCE_UPLOAD_PATH: "/dolphinscheduler" + FS_DEFAULT_FS: "file:///" FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com" FS_S3A_ACCESS_KEY: "xxxxxxx" FS_S3A_SECRET_KEY: "xxxxxxx" master: + ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. podManagementPolicy: "Parallel" + ## Replicas is the desired number of replicas of the given Template. replicas: "3" - # NodeSelector is a selector which must be true for the pod to fit on a node. - # Selector which must match a node's labels for the pod to be scheduled on that node. - # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## You can use annotations to attach arbitrary non-identifying metadata to objects. + ## Clients such as tools and libraries can retrieve this metadata. + annotations: {} + ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints. + ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core + affinity: {} + ## NodeSelector is a selector which must be true for the pod to fit on a node. + ## Selector which must match a node's labels for the pod to be scheduled on that node. + ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ nodeSelector: {} - # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, - # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. + ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, + ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. tolerations: [] - # Affinity is a group of affinity scheduling rules. - # If specified, the pod's scheduling constraints. - # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core - affinity: {} - # The jvm options for java instance startup - jvmOptions: "" + ## Compute Resources required by this container. Cannot be updated. + ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container resources: {} - # limits: - # memory: "18Gi" - # cpu: "4" - # requests: - # memory: "2Gi" - # cpu: "500m" - # You can use annotations to attach arbitrary non-identifying metadata to objects. - # Clients such as tools and libraries can retrieve this metadata. - annotations: {} - ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + # resources: + # limits: + # memory: "8Gi" + # cpu: "4" + # requests: + # memory: "2Gi" + # cpu: "500m" + ## ConfigMap configmap: + DOLPHINSCHEDULER_OPTS: "" MASTER_EXEC_THREADS: "100" MASTER_EXEC_TASK_NUM: "20" MASTER_HEARTBEAT_INTERVAL: "10" @@ -122,6 +134,8 @@ master: MASTER_MAX_CPULOAD_AVG: "100" MASTER_RESERVED_MEMORY: "0.1" MASTER_LISTEN_PORT: "5678" + ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. + ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes livenessProbe: enabled: true initialDelaySeconds: "30" @@ -138,7 +152,7 @@ master: timeoutSeconds: "5" failureThreshold: "3" successThreshold: "1" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. + ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. ## A claim in this list takes precedence over any volumes in the template, with the same name. @@ -150,31 +164,43 @@ master: storage: "20Gi" worker: + ## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. podManagementPolicy: "Parallel" + ## Replicas is the desired number of replicas of the given Template. replicas: "3" - # NodeSelector is a selector which must be true for the pod to fit on a node. - # Selector which must match a node's labels for the pod to be scheduled on that node. - # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## You can use annotations to attach arbitrary non-identifying metadata to objects. + ## Clients such as tools and libraries can retrieve this metadata. + annotations: {} + ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints. + ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core + affinity: {} + ## NodeSelector is a selector which must be true for the pod to fit on a node. + ## Selector which must match a node's labels for the pod to be scheduled on that node. + ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ nodeSelector: {} - # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, - # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. + ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, + ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. tolerations: [] - # Affinity is a group of affinity scheduling rules. - # If specified, the pod's scheduling constraints. - # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core - affinity: {} - # The jvm options for java instance startup - jvmOptions: "" + ## Compute Resources required by this container. Cannot be updated. + ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container resources: {} - # limits: - # memory: "18Gi" - # cpu: "4" - # requests: - # memory: "2Gi" - # cpu: "500m" - # You can use annotations to attach arbitrary non-identifying metadata to objects. - # Clients such as tools and libraries can retrieve this metadata. - annotations: {} + # resources: + # limits: + # memory: "8Gi" + # cpu: "4" + # requests: + # memory: "2Gi" + # cpu: "500m" + ## ConfigMap + configmap: + DOLPHINSCHEDULER_OPTS: "" + WORKER_EXEC_THREADS: "100" + WORKER_HEARTBEAT_INTERVAL: "10" + WORKER_MAX_CPULOAD_AVG: "100" + WORKER_RESERVED_MEMORY: "0.1" + WORKER_LISTEN_PORT: "1234" + WORKER_GROUP: "default" + WORKER_WEIGHT: "100" ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes livenessProbe: @@ -193,27 +219,7 @@ worker: timeoutSeconds: "5" failureThreshold: "3" successThreshold: "1" - configmap: - WORKER_EXEC_THREADS: "100" - WORKER_HEARTBEAT_INTERVAL: "10" - WORKER_FETCH_TASK_NUM: "3" - WORKER_MAX_CPULOAD_AVG: "100" - WORKER_RESERVED_MEMORY: "0.1" - WORKER_LISTEN_PORT: "1234" - WORKER_GROUP: "default" - WORKER_WEIGHT: "100" - DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" - DOLPHINSCHEDULER_ENV: - - "export HADOOP_HOME=/opt/soft/hadoop" - - "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop" - - "export SPARK_HOME1=/opt/soft/spark1" - - "export SPARK_HOME2=/opt/soft/spark2" - - "export PYTHON_HOME=/opt/soft/python" - - "export JAVA_HOME=/opt/soft/java" - - "export HIVE_HOME=/opt/soft/hive" - - "export FLINK_HOME=/opt/soft/flink" - - "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. + ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. ## A claim in this list takes precedence over any volumes in the template, with the same name. @@ -235,38 +241,40 @@ worker: storage: "20Gi" alert: + ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. + replicas: "1" + ## The deployment strategy to use to replace existing pods with new ones. strategy: type: "RollingUpdate" rollingUpdate: maxSurge: "25%" maxUnavailable: "25%" - replicas: "1" - # NodeSelector is a selector which must be true for the pod to fit on a node. - # Selector which must match a node's labels for the pod to be scheduled on that node. - # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## You can use annotations to attach arbitrary non-identifying metadata to objects. + ## Clients such as tools and libraries can retrieve this metadata. + annotations: {} + ## NodeSelector is a selector which must be true for the pod to fit on a node. + ## Selector which must match a node's labels for the pod to be scheduled on that node. + ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + affinity: {} + ## Compute Resources required by this container. Cannot be updated. + ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container nodeSelector: {} - # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, - # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. + ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, + ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. tolerations: [] - # Affinity is a group of affinity scheduling rules. - # If specified, the pod's scheduling constraints. - # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core - affinity: {} - # The jvm options for java instance startup - jvmOptions: "" + ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints. + ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core resources: {} - # limits: - # memory: "4Gi" - # cpu: "1" - # requests: - # memory: "2Gi" - # cpu: "500m" - # You can use annotations to attach arbitrary non-identifying metadata to objects. - # Clients such as tools and libraries can retrieve this metadata. - annotations: {} - ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + # resources: + # limits: + # memory: "2Gi" + # cpu: "1" + # requests: + # memory: "1Gi" + # cpu: "500m" + ## ConfigMap configmap: + DOLPHINSCHEDULER_OPTS: "" ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin" XLS_FILE_PATH: "/tmp/xls" MAIL_SERVER_HOST: "" @@ -282,63 +290,6 @@ alert: ENTERPRISE_WECHAT_SECRET: "" ENTERPRISE_WECHAT_AGENT_ID: "" ENTERPRISE_WECHAT_USERS: "" - livenessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - readinessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. - ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. - ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. - ## A claim in this list takes precedence over any volumes in the template, with the same name. - persistentVolumeClaim: - enabled: false - accessModes: - - "ReadWriteOnce" - storageClassName: "-" - storage: "20Gi" - -api: - strategy: - type: "RollingUpdate" - rollingUpdate: - maxSurge: "25%" - maxUnavailable: "25%" - replicas: "1" - # NodeSelector is a selector which must be true for the pod to fit on a node. - # Selector which must match a node's labels for the pod to be scheduled on that node. - # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - nodeSelector: {} - # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, - # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. - tolerations: [] - # Affinity is a group of affinity scheduling rules. - # If specified, the pod's scheduling constraints. - # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core - affinity: {} - # The jvm options for java instance startup - jvmOptions: "" - resources: {} - # limits: - # memory: "4Gi" - # cpu: "2" - # requests: - # memory: "2Gi" - # cpu: "500m" - # You can use annotations to attach arbitrary non-identifying metadata to objects. - # Clients such as tools and libraries can retrieve this metadata. - annotations: {} ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes livenessProbe: @@ -357,10 +308,8 @@ api: timeoutSeconds: "5" failureThreshold: "3" successThreshold: "1" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. - ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. - ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. - ## A claim in this list takes precedence over any volumes in the template, with the same name. + ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace. + ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims persistentVolumeClaim: enabled: false accessModes: @@ -368,34 +317,41 @@ api: storageClassName: "-" storage: "20Gi" -frontend: +api: + ## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. + replicas: "1" + ## The deployment strategy to use to replace existing pods with new ones. strategy: type: "RollingUpdate" rollingUpdate: maxSurge: "25%" maxUnavailable: "25%" - replicas: "1" - # NodeSelector is a selector which must be true for the pod to fit on a node. - # Selector which must match a node's labels for the pod to be scheduled on that node. - # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## You can use annotations to attach arbitrary non-identifying metadata to objects. + ## Clients such as tools and libraries can retrieve this metadata. + annotations: {} + ## NodeSelector is a selector which must be true for the pod to fit on a node. + ## Selector which must match a node's labels for the pod to be scheduled on that node. + ## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + affinity: {} + ## Compute Resources required by this container. Cannot be updated. + ## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container nodeSelector: {} - # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, - # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. + ## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, + ## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. tolerations: [] - # Affinity is a group of affinity scheduling rules. - # If specified, the pod's scheduling constraints. - # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core - affinity: {} + ## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints. + ## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core resources: {} - # limits: - # memory: "256Mi" + # resources: + # limits: + # memory: "2Gi" # cpu: "1" # requests: - # memory: "256Mi" + # memory: "1Gi" # cpu: "500m" - # You can use annotations to attach arbitrary non-identifying metadata to objects. - # Clients such as tools and libraries can retrieve this metadata. - annotations: {} + ## ConfigMap + configmap: + DOLPHINSCHEDULER_OPTS: "" ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes livenessProbe: @@ -414,10 +370,8 @@ frontend: timeoutSeconds: "5" failureThreshold: "3" successThreshold: "1" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. - ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. - ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. - ## A claim in this list takes precedence over any volumes in the template, with the same name. + ## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace. + ## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims persistentVolumeClaim: enabled: false accessModes: diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkAlertChannel.java index 7adfacce39..6b13f24d86 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkAlertChannel.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkAlertChannel.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; -import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.Map; @@ -34,8 +33,10 @@ public class DingTalkAlertChannel implements AlertChannel { public AlertResult process(AlertInfo alertInfo) { AlertData alertData = alertInfo.getAlertData(); - String alertParams = alertInfo.getAlertParams(); - Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams); + Map paramsMap = alertInfo.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "ding talk params is null"); + } return new DingTalkSender(paramsMap).sendDingTalkMsg(alertData.getTitle(), alertData.getContent()); } } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkParamsConstants.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkParamsConstants.java index c00d13076e..e94da80f20 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkParamsConstants.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkParamsConstants.java @@ -22,36 +22,24 @@ package org.apache.dolphinscheduler.plugin.alert.dingtalk; */ public class DingTalkParamsConstants { + + static final String DING_TALK_PROXY_ENABLE = "isEnableProxy"; + static final String DING_TALK_WEB_HOOK = "webhook"; + static final String NAME_DING_TALK_WEB_HOOK = "WebHook"; + static final String DING_TALK_KEYWORD = "keyword"; + static final String NAME_DING_TALK_KEYWORD = "Keyword"; + static final String NAME_DING_TALK_PROXY_ENABLE = "IsEnableProxy"; + static final String DING_TALK_PROXY = "proxy"; + static final String NAME_DING_TALK_PROXY = "Proxy"; + static final String DING_TALK_PORT = "port"; + static final String NAME_DING_TALK_PORT = "Port"; + static final String DING_TALK_USER = "user"; + static final String NAME_DING_TALK_USER = "User"; + static final String DING_TALK_PASSWORD = "password"; + static final String NAME_DING_TALK_PASSWORD = "Password"; + private DingTalkParamsConstants() { throw new IllegalStateException("Utility class"); } - static final String DING_TALK_WEB_HOOK = "dingtalk.webhook"; - - static final String NAME_DING_TALK_WEB_HOOK = "dingTalkWebHook"; - - static final String DING_TALK_KEYWORD = "dingtalk.keyword"; - - static final String NAME_DING_TALK_KEYWORD = "dingTalkKeyword"; - - public static final String DING_TALK_PROXY_ENABLE = "dingtalk.isEnableProxy"; - - static final String NAME_DING_TALK_PROXY_ENABLE = "dingTalkIsEnableProxy"; - - static final String DING_TALK_PROXY = "dingtalk.proxy"; - - static final String NAME_DING_TALK_PROXY = "dingTalkProxy"; - - static final String DING_TALK_PORT = "dingtalk.port"; - - static final String NAME_DING_TALK_PORT = "dingTalkPort"; - - static final String DING_TALK_USER = "dingtalk.user"; - - static final String NAME_DING_TALK_USER = "dingTalkUser"; - - static final String DING_TALK_PASSWORD = "dingtalk.password"; - - static final String NAME_DING_TALK_PASSWORD = "dingTalkPassword"; - } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSender.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSender.java index d152bc89cb..0d87e4779a 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSender.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/main/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSender.java @@ -75,51 +75,6 @@ public class DingTalkSender { } - public AlertResult sendDingTalkMsg(String msg, String charset) { - AlertResult alertResult; - try { - String resp = sendMsg(msg, charset); - return checkSendDingTalkSendMsgResult(resp); - } catch (Exception e) { - logger.info("send ding talk alert msg exception : {}", e.getMessage()); - alertResult = new AlertResult(); - alertResult.setStatus("false"); - alertResult.setMessage("send ding talk alert fail."); - } - return alertResult; - } - - private String sendMsg(String msg, String charset) throws IOException { - - String msgToJson = textToJsonString(msg + "#" + keyword); - HttpPost httpPost = constructHttpPost(url, msgToJson, charset); - - CloseableHttpClient httpClient; - if (Boolean.TRUE.equals(enableProxy)) { - httpClient = getProxyClient(proxy, port, user, password); - RequestConfig rcf = getProxyConfig(proxy, port); - httpPost.setConfig(rcf); - } else { - httpClient = getDefaultClient(); - } - - try { - CloseableHttpResponse response = httpClient.execute(httpPost); - String resp; - try { - HttpEntity entity = response.getEntity(); - resp = EntityUtils.toString(entity, charset); - EntityUtils.consume(entity); - } finally { - response.close(); - } - logger.info("Ding Talk send {}, resp: {}", msg, resp); - return resp; - } finally { - httpClient.close(); - } - } - private static HttpPost constructHttpPost(String url, String msg, String charset) { HttpPost post = new HttpPost(url); StringEntity entity = new StringEntity(msg, charset); @@ -155,27 +110,6 @@ public class DingTalkSender { return JSONUtils.toJsonString(items); } - public static class DingTalkSendMsgResponse { - private Integer errcode; - private String errmsg; - - public Integer getErrcode() { - return errcode; - } - - public void setErrcode(Integer errcode) { - this.errcode = errcode; - } - - public String getErrmsg() { - return errmsg; - } - - public void setErrmsg(String errmsg) { - this.errmsg = errmsg; - } - } - private static AlertResult checkSendDingTalkSendMsgResult(String result) { AlertResult alertResult = new AlertResult(); alertResult.setStatus("false"); @@ -201,4 +135,70 @@ public class DingTalkSender { return alertResult; } + public AlertResult sendDingTalkMsg(String title, String content) { + AlertResult alertResult; + try { + String resp = sendMsg(title, content); + return checkSendDingTalkSendMsgResult(resp); + } catch (Exception e) { + logger.info("send ding talk alert msg exception : {}", e.getMessage()); + alertResult = new AlertResult(); + alertResult.setStatus("false"); + alertResult.setMessage("send ding talk alert fail."); + } + return alertResult; + } + + private String sendMsg(String title, String content) throws IOException { + + String msgToJson = textToJsonString(title + content + "#" + keyword); + HttpPost httpPost = constructHttpPost(url, msgToJson, "UTF-8"); + + CloseableHttpClient httpClient; + if (Boolean.TRUE.equals(enableProxy)) { + httpClient = getProxyClient(proxy, port, user, password); + RequestConfig rcf = getProxyConfig(proxy, port); + httpPost.setConfig(rcf); + } else { + httpClient = getDefaultClient(); + } + + try { + CloseableHttpResponse response = httpClient.execute(httpPost); + String resp; + try { + HttpEntity entity = response.getEntity(); + resp = EntityUtils.toString(entity, "UTF-8"); + EntityUtils.consume(entity); + } finally { + response.close(); + } + logger.info("Ding Talk send title :{},content : {}, resp: {}", title, content, resp); + return resp; + } finally { + httpClient.close(); + } + } + + public static class DingTalkSendMsgResponse { + private Integer errcode; + private String errmsg; + + public Integer getErrcode() { + return errcode; + } + + public void setErrcode(Integer errcode) { + this.errcode = errcode; + } + + public String getErrmsg() { + return errmsg; + } + + public void setErrmsg(String errmsg) { + this.errmsg = errmsg; + } + } + } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/test/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSenderTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/test/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSenderTest.java index 4512402dc4..bc17d4185b 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/test/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSenderTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-dingtalk/src/test/java/org/apache/dolphinscheduler/plugin/alert/dingtalk/DingTalkSenderTest.java @@ -50,7 +50,7 @@ public class DingTalkSenderTest { dingTalkSender.sendDingTalkMsg("keyWord+Welcome", "UTF-8"); dingTalkConfig.put(DingTalkParamsConstants.NAME_DING_TALK_PROXY_ENABLE, "true"); dingTalkSender = new DingTalkSender(dingTalkConfig); - AlertResult alertResult = dingTalkSender.sendDingTalkMsg("keyWord+Welcome", "UTF-8"); + AlertResult alertResult = dingTalkSender.sendDingTalkMsg("title", "content test"); Assert.assertEquals("false",alertResult.getStatus()); } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannel.java index c793af5710..3dbf0b8fb9 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannel.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannel.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; -import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.Map; @@ -38,8 +37,10 @@ public class EmailAlertChannel implements AlertChannel { public AlertResult process(AlertInfo info) { AlertData alert = info.getAlertData(); - String alertParams = info.getAlertParams(); - Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams); + Map paramsMap = info.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "mail params is null"); + } MailSender mailSender = new MailSender(paramsMap); AlertResult alertResult = mailSender.sendMails(alert.getTitle(), alert.getContent()); diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/MailParamsConstants.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/MailParamsConstants.java index 0bbbe103f0..9b49b4705b 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/MailParamsConstants.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/main/java/org/apache/dolphinscheduler/plugin/alert/email/MailParamsConstants.java @@ -33,33 +33,33 @@ public class MailParamsConstants { public static final String NAME_PLUGIN_DEFAULT_EMAIL_RECEIVERCCS = "receiverCcs"; public static final String MAIL_PROTOCOL = "transport.protocol"; - public static final String NAME_MAIL_PROTOCOL = "protocol"; + public static final String NAME_MAIL_PROTOCOL = "mail.protocol"; - public static final String MAIL_SMTP_HOST = "smtp.host"; + public static final String MAIL_SMTP_HOST = "mail.smtp.host"; public static final String NAME_MAIL_SMTP_HOST = "serverHost"; - public static final String MAIL_SMTP_PORT = "smtp.port"; + public static final String MAIL_SMTP_PORT = "mail.smtp.port"; public static final String NAME_MAIL_SMTP_PORT = "serverPort"; - public static final String MAIL_SENDER = "sender"; + public static final String MAIL_SENDER = "mail.sender"; public static final String NAME_MAIL_SENDER = "sender"; - public static final String MAIL_SMTP_AUTH = "smtp.auth"; + public static final String MAIL_SMTP_AUTH = "mail.smtp.auth"; public static final String NAME_MAIL_SMTP_AUTH = "enableSmtpAuth"; - public static final String MAIL_USER = "user"; + public static final String MAIL_USER = "mail.user"; public static final String NAME_MAIL_USER = "user"; - public static final String MAIL_PASSWD = "passwd"; + public static final String MAIL_PASSWD = "mail.passwd"; public static final String NAME_MAIL_PASSWD = "passwd"; - public static final String MAIL_SMTP_STARTTLS_ENABLE = "smtp.starttls.enable"; + public static final String MAIL_SMTP_STARTTLS_ENABLE = "mail.smtp.starttls.enable"; public static final String NAME_MAIL_SMTP_STARTTLS_ENABLE = "starttlsEnable"; - public static final String MAIL_SMTP_SSL_ENABLE = "smtp.ssl.enable"; + public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable"; public static final String NAME_MAIL_SMTP_SSL_ENABLE = "sslEnable"; - public static final String MAIL_SMTP_SSL_TRUST = "smtp.ssl.trust"; + public static final String MAIL_SMTP_SSL_TRUST = "mail.smtp.ssl.trust"; public static final String NAME_MAIL_SMTP_SSL_TRUST = "smtpSslTrust"; } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelFactoryTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelFactoryTest.java index 977cd8fefc..37a11e47fb 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelFactoryTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelFactoryTest.java @@ -19,13 +19,10 @@ package org.apache.dolphinscheduler.plugin.alert.email; import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.params.base.PluginParams; -import org.apache.dolphinscheduler.spi.utils.JSONUtils; import java.util.List; -import org.junit.After; import org.junit.Assert; -import org.junit.Before; import org.junit.Test; /** @@ -36,29 +33,13 @@ import org.junit.Test; */ public class EmailAlertChannelFactoryTest { - @Before - public void before() throws Exception { - } - - @After - public void after() throws Exception { - } - - /** - * Method: getName() - */ - @Test - public void testGetName() throws Exception { - } - /** * Method: getParams() */ @Test - public void testGetParams() throws Exception { + public void testGetParams() { EmailAlertChannelFactory emailAlertChannelFactory = new EmailAlertChannelFactory(); List params = emailAlertChannelFactory.getParams(); - System.out.println(JSONUtils.toJsonString(params)); Assert.assertEquals(12, params.size()); } @@ -66,7 +47,7 @@ public class EmailAlertChannelFactoryTest { * Method: create() */ @Test - public void testCreate() throws Exception { + public void testCreate() { EmailAlertChannelFactory emailAlertChannelFactory = new EmailAlertChannelFactory(); AlertChannel alertChannel = emailAlertChannelFactory.create(); Assert.assertNotNull(alertChannel); diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelTest.java index 97a1013399..fc28df272c 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/EmailAlertChannelTest.java @@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.spi.alert.AlertResult; import org.apache.dolphinscheduler.spi.alert.ShowType; import org.apache.dolphinscheduler.spi.params.InputParam; import org.apache.dolphinscheduler.spi.params.PasswordParam; +import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import org.apache.dolphinscheduler.spi.params.RadioParam; import org.apache.dolphinscheduler.spi.params.base.DataType; import org.apache.dolphinscheduler.spi.params.base.ParamsOptions; @@ -34,6 +35,7 @@ import org.apache.dolphinscheduler.spi.utils.JSONUtils; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import org.junit.Assert; import org.junit.Test; @@ -66,7 +68,9 @@ public class EmailAlertChannelTest { .setTitle("test"); AlertInfo alertInfo = new AlertInfo(); alertInfo.setAlertData(alertData); - alertInfo.setAlertParams(getEmailAlertParams()); + Map paramsMap = PluginParamsTransfer.getPluginParamsMap(getEmailAlertParams()); + + alertInfo.setAlertParams(paramsMap); AlertResult alertResult = emailAlertChannel.process(alertInfo); Assert.assertNotNull(alertResult); Assert.assertEquals("false", alertResult.getStatus()); diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/MailUtilsTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/MailUtilsTest.java index e19c819dae..5b0b084d72 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/MailUtilsTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-email/src/test/java/org/apache/dolphinscheduler/plugin/alert/email/MailUtilsTest.java @@ -53,6 +53,7 @@ public class MailUtilsTest { emailConfig.put(MailParamsConstants.NAME_MAIL_SENDER, "xxx1.xxx.com"); emailConfig.put(MailParamsConstants.NAME_MAIL_USER, "xxx2.xxx.com"); emailConfig.put(MailParamsConstants.NAME_MAIL_PASSWD, "111111"); + emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_AUTH, "true"); emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_STARTTLS_ENABLE, "true"); emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_SSL_ENABLE, "false"); emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_SSL_TRUST, "false"); diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml new file mode 100644 index 0000000000..44d4cdbb07 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/pom.xml @@ -0,0 +1,82 @@ + + + + + dolphinscheduler-alert-plugin + org.apache.dolphinscheduler + 1.3.4-SNAPSHOT + + 4.0.0 + + org.apache.dolphinscheduler + dolphinscheduler-alert-feishu + dolphinscheduler-plugin + + + + + org.apache.dolphinscheduler + dolphinscheduler-spi + provided + + + org.apache.httpcomponents + httpclient + + + com.google.guava + guava + + + + ch.qos.logback + logback-classic + + + + org.slf4j + slf4j-api + + + + com.fasterxml.jackson.core + jackson-annotations + provided + + + + junit + junit + test + + + + org.mockito + mockito-core + jar + test + + + + + dolphinscheduler-alert-feishu-${project.version} + + + \ No newline at end of file diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java new file mode 100644 index 0000000000..3bbdaa9997 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannel.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.dolphinscheduler.spi.alert.AlertChannel; +import org.apache.dolphinscheduler.spi.alert.AlertData; +import org.apache.dolphinscheduler.spi.alert.AlertInfo; +import org.apache.dolphinscheduler.spi.alert.AlertResult; + +import java.util.Map; + +public class FeiShuAlertChannel implements AlertChannel { + @Override + public AlertResult process(AlertInfo alertInfo) { + + AlertData alertData = alertInfo.getAlertData(); + Map paramsMap = alertInfo.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "fei shu params is null"); + } + return new FeiShuSender(paramsMap).sendFeiShuMsg(alertData); + } +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java new file mode 100644 index 0000000000..0e863f95d4 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactory.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.dolphinscheduler.spi.alert.AlertChannel; +import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory; +import org.apache.dolphinscheduler.spi.params.InputParam; +import org.apache.dolphinscheduler.spi.params.PasswordParam; +import org.apache.dolphinscheduler.spi.params.RadioParam; +import org.apache.dolphinscheduler.spi.params.base.ParamsOptions; +import org.apache.dolphinscheduler.spi.params.base.PluginParams; +import org.apache.dolphinscheduler.spi.params.base.Validate; + +import java.util.Arrays; +import java.util.List; + +public class FeiShuAlertChannelFactory implements AlertChannelFactory { + @Override + public String getName() { + return "Feishu"; + } + + @Override + public List getParams() { + InputParam webHookParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_WEB_HOOK, FeiShuParamsConstants.WEB_HOOK) + .addValidate(Validate.newBuilder() + .setRequired(true) + .build()) + .build(); + RadioParam isEnableProxy = + RadioParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE, FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE) + .addParamsOptions(new ParamsOptions("YES", true, false)) + .addParamsOptions(new ParamsOptions("NO", false, false)) + .setValue(true) + .addValidate(Validate.newBuilder() + .setRequired(false) + .build()) + .build(); + InputParam proxyParam = + InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY, FeiShuParamsConstants.FEI_SHU_PROXY) + .addValidate(Validate.newBuilder() + .setRequired(false).build()) + .build(); + + InputParam portParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PORT, FeiShuParamsConstants.FEI_SHU_PORT) + .addValidate(Validate.newBuilder() + .setRequired(false).build()) + .build(); + + InputParam userParam = + InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_USER, FeiShuParamsConstants.FEI_SHU_USER) + .addValidate(Validate.newBuilder() + .setRequired(false).build()) + .build(); + PasswordParam passwordParam = PasswordParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD, FeiShuParamsConstants.FEI_SHU_PASSWORD) + .setPlaceholder("if enable use authentication, you need input password") + .build(); + + return Arrays.asList(webHookParam, isEnableProxy, proxyParam, portParam, userParam, passwordParam); + + } + + @Override + public AlertChannel create() { + return new FeiShuAlertChannel(); + } +} diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/MsgManager.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java similarity index 64% rename from dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/MsgManager.java rename to dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java index e7fb161162..e71be3e2bd 100644 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/MsgManager.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertPlugin.java @@ -15,26 +15,16 @@ * limitations under the License. */ -package org.apache.dolphinscheduler.alert.manager; +package org.apache.dolphinscheduler.plugin.alert.feishu; -import org.apache.dolphinscheduler.dao.entity.Alert; +import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin; +import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.google.common.collect.ImmutableList; -/** - * SMS send manager - */ -public class MsgManager { - - private static final Logger logger = LoggerFactory.getLogger(MsgManager.class); - - /** - * SMS send - * - * @param alert the alert - */ - public void send(Alert alert) { - logger.info("send message {}", alert); +public class FeiShuAlertPlugin implements DolphinSchedulerPlugin { + @Override + public Iterable getAlertChannelFactorys() { + return ImmutableList.of(new FeiShuAlertChannelFactory()); } } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java new file mode 100644 index 0000000000..0b3c329792 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuParamsConstants.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +public class FeiShuParamsConstants { + + private FeiShuParamsConstants() { + throw new IllegalStateException("Utility class"); + } + + static final String WEB_HOOK = "webhook"; + + static final String NAME_WEB_HOOK = "webHook"; + + public static final String FEI_SHU_PROXY_ENABLE = "isEnableProxy"; + + static final String NAME_FEI_SHU_PROXY_ENABLE = "isEnableProxy"; + + static final String FEI_SHU_PROXY = "proxy"; + + static final String NAME_FEI_SHU_PROXY = "proxy"; + + static final String FEI_SHU_PORT = "port"; + + static final String NAME_FEI_SHU_PORT = "port"; + + static final String FEI_SHU_USER = "user"; + + static final String NAME_FEI_SHU_USER = "user"; + + static final String FEI_SHU_PASSWORD = "password"; + + static final String NAME_FEI_SHU_PASSWORD = "password"; +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java new file mode 100644 index 0000000000..8fdafe7182 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSender.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.dolphinscheduler.spi.alert.AlertData; +import org.apache.dolphinscheduler.spi.alert.AlertResult; +import org.apache.dolphinscheduler.spi.utils.JSONUtils; + +import org.apache.commons.codec.binary.StringUtils; +import org.apache.http.HttpEntity; +import org.apache.http.HttpStatus; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public class FeiShuSender { + + private static final Logger logger = LoggerFactory.getLogger(FeiShuSender.class); + + private String url; + + private Boolean enableProxy; + + private String proxy; + + private Integer port; + + private String user; + + private String password; + + FeiShuSender(Map config) { + url = config.get(FeiShuParamsConstants.NAME_WEB_HOOK); + enableProxy = Boolean.valueOf(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE)); + if (Boolean.TRUE.equals(enableProxy)) { + port = Integer.parseInt(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PORT)); + proxy = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY); + user = config.get(FeiShuParamsConstants.NAME_FEI_SHU_USER); + password = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD); + } + + } + + private static String textToJsonString(AlertData alertData) { + + Map items = new HashMap<>(2); + items.put("msg_type", "text"); + Map textContent = new HashMap<>(); + byte[] byt = StringUtils.getBytesUtf8(formatContent(alertData)); + String txt = StringUtils.newStringUtf8(byt); + textContent.put("text", txt); + items.put("content", textContent); + return JSONUtils.toJsonString(items); + } + + public static AlertResult checkSendFeiShuSendMsgResult(String result) { + AlertResult alertResult = new AlertResult(); + alertResult.setStatus("false"); + + if (org.apache.dolphinscheduler.spi.utils.StringUtils.isBlank(result)) { + alertResult.setMessage("send fei shu msg error"); + logger.info("send fei shu msg error,fei shu server resp is null"); + return alertResult; + } + FeiShuSendMsgResponse sendMsgResponse = JSONUtils.parseObject(result, FeiShuSendMsgResponse.class); + + if (null == sendMsgResponse) { + alertResult.setMessage("send fei shu msg fail"); + logger.info("send fei shu msg error,resp error"); + return alertResult; + } + if (sendMsgResponse.statusCode == 0) { + alertResult.setStatus("true"); + alertResult.setMessage("send fei shu msg success"); + return alertResult; + } + alertResult.setMessage(String.format("alert send fei shu msg error : %s", sendMsgResponse.getStatusMessage())); + logger.info("alert send fei shu msg error : {} ,Extra : {} ", sendMsgResponse.getStatusMessage(), sendMsgResponse.getExtra()); + return alertResult; + } + + public static String formatContent(AlertData alertData) { + if (alertData.getContent() != null) { + + List list = JSONUtils.toList(alertData.getContent(), Map.class); + if (list.isEmpty()) { + return alertData.getTitle() + alertData.getContent(); + } + + StringBuilder contents = new StringBuilder(100); + contents.append(String.format("`%s`%n", alertData.getTitle())); + for (Map map : list) { + Iterator> entries = map.entrySet().iterator(); + while (entries.hasNext()) { + Entry entry = entries.next(); + String key = entry.getKey(); + String value = entry.getValue().toString(); + contents.append(key + ":" + value); + contents.append("\n"); + } + } + return contents.toString(); + } + return null; + } + + public AlertResult sendFeiShuMsg(AlertData alertData) { + AlertResult alertResult; + try { + String resp = sendMsg(alertData); + return checkSendFeiShuSendMsgResult(resp); + } catch (Exception e) { + logger.info("send fei shu alert msg exception : {}", e.getMessage()); + alertResult = new AlertResult(); + alertResult.setStatus("false"); + alertResult.setMessage("send fei shu alert fail."); + } + return alertResult; + } + + private String sendMsg(AlertData alertData) throws IOException { + + String msgToJson = textToJsonString(alertData); + + HttpPost httpPost = HttpRequestUtil.constructHttpPost(url, msgToJson); + + CloseableHttpClient httpClient; + + httpClient = HttpRequestUtil.getHttpClient(enableProxy, proxy, port, user, password); + + try { + CloseableHttpResponse response = httpClient.execute(httpPost); + + int statusCode = response.getStatusLine().getStatusCode(); + if (statusCode != HttpStatus.SC_OK) { + logger.error("send feishu message error, return http status code: {} ", statusCode); + } + String resp; + try { + HttpEntity entity = response.getEntity(); + resp = EntityUtils.toString(entity, "utf-8"); + EntityUtils.consume(entity); + } finally { + response.close(); + } + logger.info("Fei Shu send title :{} ,content :{}, resp: {}", alertData.getTitle(), alertData.getContent(), resp); + return resp; + } finally { + httpClient.close(); + } + } + + public static class FeiShuSendMsgResponse { + @JsonProperty("Extra") + private String extra; + @JsonProperty("StatusCode") + private Integer statusCode; + @JsonProperty("StatusMessage") + private String statusMessage; + + public String getExtra() { + return extra; + } + + public void setExtra(String extra) { + this.extra = extra; + } + + public Integer getStatusCode() { + return statusCode; + } + + public void setStatusCode(Integer statusCode) { + this.statusCode = statusCode; + } + + public String getStatusMessage() { + return statusMessage; + } + + public void setStatusMessage(String statusMessage) { + this.statusMessage = statusMessage; + } + } + +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/HttpRequestUtil.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/HttpRequestUtil.java new file mode 100644 index 0000000000..3d143579d4 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/main/java/org/apache/dolphinscheduler/plugin/alert/feishu/HttpRequestUtil.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.http.HttpHost; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; + +public class HttpRequestUtil { + + public static CloseableHttpClient getHttpClient(boolean enableProxy, String proxy, Integer port, String user, String password) { + if (enableProxy) { + HttpHost httpProxy = new HttpHost(proxy, port); + CredentialsProvider provider = new BasicCredentialsProvider(); + provider.setCredentials(new AuthScope(httpProxy), new UsernamePasswordCredentials(user, password)); + return HttpClients.custom().setDefaultCredentialsProvider(provider).build(); + } else { + return HttpClients.createDefault(); + } + } + + public static HttpPost constructHttpPost(String url, String msg) { + HttpPost post = new HttpPost(url); + StringEntity entity = new StringEntity(msg, ContentType.APPLICATION_JSON); + post.setEntity(entity); + return post; + } +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java new file mode 100644 index 0000000000..d73355dbc0 --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.dolphinscheduler.spi.alert.AlertChannel; +import org.apache.dolphinscheduler.spi.params.base.PluginParams; +import org.apache.dolphinscheduler.spi.utils.JSONUtils; + +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; + +public class FeiShuAlertChannelFactoryTest { + + @Test + public void testGetParams() { + FeiShuAlertChannelFactory feiShuAlertChannelFactory = new FeiShuAlertChannelFactory(); + List params = feiShuAlertChannelFactory.getParams(); + JSONUtils.toJsonString(params); + Assert.assertEquals(6, params.size()); + } + + @Test + public void testCreate() { + FeiShuAlertChannelFactory feiShuAlertChannelFactory = new FeiShuAlertChannelFactory(); + AlertChannel alertChannel = feiShuAlertChannelFactory.create(); + Assert.assertNotNull(alertChannel); + } +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java new file mode 100644 index 0000000000..05110d42fe --- /dev/null +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-feishu/src/test/java/org/apache/dolphinscheduler/plugin/alert/feishu/FeiShuSenderTest.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.plugin.alert.feishu; + +import org.apache.dolphinscheduler.spi.alert.AlertData; +import org.apache.dolphinscheduler.spi.alert.AlertResult; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class FeiShuSenderTest { + + + private static Map feiShuConfig = new HashMap<>(); + + @Before + public void initFeiShuConfig() { + feiShuConfig.put(FeiShuParamsConstants.WEB_HOOK, "https://open.feishu.cn/open-apis/bot/v2/hook/xxxxx"); + } + + @Test + public void testSend() { + AlertData alertData = new AlertData(); + alertData.setTitle("feishu test title"); + alertData.setContent("feishu test content"); + FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig); + AlertResult alertResult = feiShuSender.sendFeiShuMsg(alertData); + Assert.assertEquals("false", alertResult.getStatus()); + } + + @Test + public void testFormatContent() { + String alertMsg = "[\n" + + " {\n" + + " \"owner\": \"dolphinscheduler\",\n" + + " \"processEndTime\": \"2021-01-29 19:01:11\",\n" + + " \"processHost\": \"10.81.129.4:5678\",\n" + + " \"processId\": 2926,\n" + + " \"processName\": \"3-20210129190038108\",\n" + + " \"processStartTime\": \"2021-01-29 19:00:38\",\n" + + " \"processState\": \"SUCCESS\",\n" + + " \"processType\": \"START_PROCESS\",\n" + + " \"projectId\": 2,\n" + + " \"projectName\": \"testdelproject\",\n" + + " \"recovery\": \"NO\",\n" + + " \"retryTimes\": 0,\n" + + " \"runTimes\": 1,\n" + + " \"taskId\": 0\n" + + " }\n" + + "]"; + AlertData alertData = new AlertData(); + alertData.setTitle(""); + alertData.setContent(alertMsg); + Assert.assertNotNull(FeiShuSender.formatContent(alertData)); + } + + @Test + public void testSendWithFormatException() { + AlertData alertData = new AlertData(); + alertData.setTitle("feishu test title"); + alertData.setContent("feishu test content"); + FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig); + String alertResult = feiShuSender.formatContent(alertData); + Assert.assertEquals(alertResult, alertData.getTitle() + alertData.getContent()); + } + + @Test + public void testCheckSendFeiShuSendMsgResult() { + + FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig); + AlertResult alertResult = feiShuSender.checkSendFeiShuSendMsgResult(""); + Assert.assertFalse(Boolean.valueOf(alertResult.getStatus())); + AlertResult alertResult2 = feiShuSender.checkSendFeiShuSendMsgResult("123"); + Assert.assertEquals("send fei shu msg fail",alertResult2.getMessage()); + + String response = "{\"StatusCode\":\"0\",\"extra\":\"extra\",\"StatusMessage\":\"StatusMessage\"}"; + AlertResult alertResult3 = feiShuSender.checkSendFeiShuSendMsgResult(response); + Assert.assertTrue(Boolean.valueOf(alertResult3.getStatus())); + } +} diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannel.java index 27bc1903d8..cb550b7541 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannel.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/main/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannel.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; -import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.Map; @@ -33,8 +32,10 @@ public class HttpAlertChannel implements AlertChannel { public AlertResult process(AlertInfo alertInfo) { AlertData alertData = alertInfo.getAlertData(); - String alertParams = alertInfo.getAlertParams(); - Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams); + Map paramsMap = alertInfo.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "http params is null"); + } return new HttpSender(paramsMap).send(alertData.getContent()); } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/test/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/test/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelTest.java index 31a438b4fc..4d385e8a54 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/test/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-http/src/test/java/org/apache/dolphinscheduler/plugin/alert/http/HttpAlertChannelTest.java @@ -21,12 +21,14 @@ import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; import org.apache.dolphinscheduler.spi.params.InputParam; +import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import org.apache.dolphinscheduler.spi.params.base.PluginParams; import org.apache.dolphinscheduler.spi.params.base.Validate; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.junit.Assert; import org.junit.Test; @@ -45,7 +47,7 @@ public class HttpAlertChannelTest { alertData.setContent("Fault tolerance warning"); alertInfo.setAlertData(alertData); AlertResult alertResult = alertChannel.process(alertInfo); - Assert.assertEquals("Request types are not supported", alertResult.getMessage()); + Assert.assertEquals("http params is null", alertResult.getMessage()); } @Test @@ -56,7 +58,8 @@ public class HttpAlertChannelTest { AlertData alertData = new AlertData(); alertData.setContent("Fault tolerance warning"); alertInfo.setAlertData(alertData); - alertInfo.setAlertParams(getParams()); + Map paramsMap = PluginParamsTransfer.getPluginParamsMap(getParams()); + alertInfo.setAlertParams(paramsMap); AlertResult alertResult = alertChannel.process(alertInfo); Assert.assertEquals("true", alertResult.getStatus()); } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/main/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/main/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannel.java index 2a0021277d..dc6aa27e25 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/main/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannel.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/main/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannel.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; -import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.Map; @@ -33,8 +32,10 @@ public class ScriptAlertChannel implements AlertChannel { @Override public AlertResult process(AlertInfo alertinfo) { AlertData alertData = alertinfo.getAlertData(); - String alertParams = alertinfo.getAlertParams(); - Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams); + Map paramsMap = alertinfo.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "ding talk params is null"); + } return new ScriptSender(paramsMap).sendScriptAlert(alertData.getTitle()); } } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/test/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannelFactoryTest.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/test/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannelFactoryTest.java index 72f2197315..8cedc2c38e 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/test/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannelFactoryTest.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-script/src/test/java/org/apache/dolphinscheduler/plugin/alert/script/ScriptAlertChannelFactoryTest.java @@ -19,7 +19,6 @@ package org.apache.dolphinscheduler.plugin.alert.script; import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.params.base.PluginParams; -import org.apache.dolphinscheduler.spi.utils.JSONUtils; import java.util.List; @@ -35,7 +34,6 @@ public class ScriptAlertChannelFactoryTest { public void testGetParams() { ScriptAlertChannelFactory scriptAlertChannelFactory = new ScriptAlertChannelFactory(); List params = scriptAlertChannelFactory.getParams(); - JSONUtils.toJsonString(params); Assert.assertEquals(3, params.size()); } diff --git a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/src/main/java/org/apache/dolphinscheduler/plugin/alert/wechat/WeChatAlertChannel.java b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/src/main/java/org/apache/dolphinscheduler/plugin/alert/wechat/WeChatAlertChannel.java index 4cdd4d375c..36cce09ff3 100644 --- a/dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/src/main/java/org/apache/dolphinscheduler/plugin/alert/wechat/WeChatAlertChannel.java +++ b/dolphinscheduler-alert-plugin/dolphinscheduler-alert-wechat/src/main/java/org/apache/dolphinscheduler/plugin/alert/wechat/WeChatAlertChannel.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertData; import org.apache.dolphinscheduler.spi.alert.AlertInfo; import org.apache.dolphinscheduler.spi.alert.AlertResult; -import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.Map; @@ -33,8 +32,10 @@ public class WeChatAlertChannel implements AlertChannel { @Override public AlertResult process(AlertInfo info) { AlertData alertData = info.getAlertData(); - String alertParams = info.getAlertParams(); - Map paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams); + Map paramsMap = info.getAlertParams(); + if (null == paramsMap) { + return new AlertResult("false", "we chat params is null"); + } return new WeChatSender(paramsMap).sendEnterpriseWeChat(alertData.getTitle(), alertData.getContent()); } diff --git a/dolphinscheduler-alert-plugin/pom.xml b/dolphinscheduler-alert-plugin/pom.xml index ede5051049..d199dc4e2c 100644 --- a/dolphinscheduler-alert-plugin/pom.xml +++ b/dolphinscheduler-alert-plugin/pom.xml @@ -35,6 +35,7 @@ dolphinscheduler-alert-dingtalk dolphinscheduler-alert-script dolphinscheduler-alert-http + dolphinscheduler-alert-feishu diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java index 54afc93442..b25cd57f4c 100644 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java +++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java @@ -29,7 +29,6 @@ import org.apache.dolphinscheduler.alert.utils.PropertyUtils; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.DaoFactory; -import org.apache.dolphinscheduler.dao.PluginDao; import org.apache.dolphinscheduler.dao.entity.Alert; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; @@ -53,8 +52,6 @@ public class AlertServer { */ private AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class); - private PluginDao pluginDao = DaoFactory.getDaoInstance(PluginDao.class); - private AlertSender alertSender; private static AlertServer instance; @@ -114,7 +111,7 @@ public class AlertServer { NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(ALERT_RPC_PORT); this.server = new NettyRemotingServer(serverConfig); - this.server.registerProcessor(CommandType.ALERT_SEND_REQUEST, new AlertRequestProcessor(alertDao, alertPluginManager, pluginDao)); + this.server.registerProcessor(CommandType.ALERT_SEND_REQUEST, new AlertRequestProcessor(alertDao, alertPluginManager)); this.server.start(); } @@ -133,7 +130,7 @@ public class AlertServer { logger.warn("No Alert Plugin . Can not send alert info. "); } else { List alerts = alertDao.listWaitExecutionAlert(); - alertSender = new AlertSender(alerts, alertDao, alertPluginManager, pluginDao); + alertSender = new AlertSender(alerts, alertDao, alertPluginManager); alertSender.run(); } } diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java deleted file mode 100644 index 874b866759..0000000000 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/manager/EmailManager.java +++ /dev/null @@ -1,55 +0,0 @@ -///* -// * Licensed to the Apache Software Foundation (ASF) under one or more -// * contributor license agreements. See the NOTICE file distributed with -// * this work for additional information regarding copyright ownership. -// * The ASF licenses this file to You under the Apache License, Version 2.0 -// * (the "License"); you may not use this file except in compliance with -// * the License. You may obtain a copy of the License at -// * -// * http://www.apache.org/licenses/LICENSE-2.0 -// * -// * Unless required by applicable law or agreed to in writing, software -// * distributed under the License is distributed on an "AS IS" BASIS, -// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// * See the License for the specific language governing permissions and -// * limitations under the License. -// */ -//package org.apache.dolphinscheduler.alert.manager; -// -//import org.apache.dolphinscheduler.alert.utils.MailUtils; -// -//import java.util.List; -//import java.util.Map; -// -///** -// * email send manager -// */ -//public class EmailManager { -// /** -// * email send -// * @param receiversList the receiver list -// * @param receiversCcList the cc List -// * @param title the title -// * @param content the content -// * @param showType the showType -// * @return the send result -// */ -// public Map send(List receiversList,List receiversCcList,String title,String content,String showType){ -// -// return MailUtils.sendMails(receiversList, receiversCcList, title, content, showType); -// } -// -// /** -// * msg send -// * @param receiversList the receiver list -// * @param title the title -// * @param content the content -// * @param showType the showType -// * @return the send result -// */ -// public Map send(List receiversList,String title,String content,String showType){ -// -// return MailUtils.sendMails(receiversList,title, content, showType); -// } -// -//} diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManager.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManager.java index a660087c89..d795e71d52 100644 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManager.java +++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManager.java @@ -31,6 +31,7 @@ import org.apache.dolphinscheduler.spi.classloader.ThreadContextClassLoader; import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import org.apache.dolphinscheduler.spi.params.base.PluginParams; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -47,6 +48,11 @@ public class AlertPluginManager extends AbstractDolphinPluginManager { private final Map alertChannelFactoryMap = new ConcurrentHashMap<>(); private final Map alertChannelMap = new ConcurrentHashMap<>(); + /** + * k->pluginDefineId v->pluginDefineName + */ + private final Map pluginDefineMap = new HashMap<>(); + public void addAlertChannelFactory(AlertChannelFactory alertChannelFactory) { requireNonNull(alertChannelFactory, "alertChannelFactory is null"); @@ -83,6 +89,10 @@ public class AlertPluginManager extends AbstractDolphinPluginManager { return alertChannelMap; } + public String getPluginNameById(int id) { + return pluginDefineMap.get(id); + } + @Override public void installPlugin(DolphinSchedulerPlugin dolphinSchedulerPlugin) { for (AlertChannelFactory alertChannelFactory : dolphinSchedulerPlugin.getAlertChannelFactorys()) { @@ -93,7 +103,8 @@ public class AlertPluginManager extends AbstractDolphinPluginManager { String paramsJson = PluginParamsTransfer.transferParamsToJson(params); PluginDefine pluginDefine = new PluginDefine(nameEn, PluginType.ALERT.getDesc(), paramsJson); - pluginDao.addOrUpdatePluginDefine(pluginDefine); + int id = pluginDao.addOrUpdatePluginDefine(pluginDefine); + pluginDefineMap.put(id, pluginDefine.getPluginName()); } } } diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessor.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessor.java index 5e8a8f89d6..ec716d9878 100644 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessor.java +++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessor.java @@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager; import org.apache.dolphinscheduler.alert.runner.AlertSender; import org.apache.dolphinscheduler.common.utils.Preconditions; import org.apache.dolphinscheduler.dao.AlertDao; -import org.apache.dolphinscheduler.dao.PluginDao; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.alert.AlertSendRequestCommand; @@ -35,18 +34,16 @@ import org.slf4j.LoggerFactory; import io.netty.channel.Channel; /** - * alert request processor + * alert request processor */ public class AlertRequestProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(AlertRequestProcessor.class); private AlertDao alertDao; - private PluginDao pluginDao; private AlertPluginManager alertPluginManager; - public AlertRequestProcessor(AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) { + public AlertRequestProcessor(AlertDao alertDao, AlertPluginManager alertPluginManager) { this.alertDao = alertDao; - this.pluginDao = pluginDao; this.alertPluginManager = alertPluginManager; } @@ -59,7 +56,7 @@ public class AlertRequestProcessor implements NettyRequestProcessor { command.getBody(), AlertSendRequestCommand.class); logger.info("received command : {}", alertSendRequestCommand); - AlertSender alertSender = new AlertSender(alertDao, alertPluginManager, pluginDao); + AlertSender alertSender = new AlertSender(alertDao, alertPluginManager); AlertSendResponseCommand alertSendResponseCommand = alertSender.syncHandler(alertSendRequestCommand.getGroupId(), alertSendRequestCommand.getTitle(), alertSendRequestCommand.getContent()); channel.writeAndFlush(alertSendResponseCommand.convert2Command(command.getOpaque())); diff --git a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java index d635574543..114d01a845 100644 --- a/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java +++ b/dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/runner/AlertSender.java @@ -20,8 +20,8 @@ package org.apache.dolphinscheduler.alert.runner; import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager; import org.apache.dolphinscheduler.common.enums.AlertStatus; import org.apache.dolphinscheduler.common.utils.CollectionUtils; +import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.AlertDao; -import org.apache.dolphinscheduler.dao.PluginDao; import org.apache.dolphinscheduler.dao.entity.Alert; import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand; @@ -33,6 +33,7 @@ import org.apache.dolphinscheduler.spi.alert.AlertResult; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -46,25 +47,22 @@ public class AlertSender { private List alertList; private AlertDao alertDao; - private PluginDao pluginDao; private AlertPluginManager alertPluginManager; public AlertSender(AlertPluginManager alertPluginManager) { this.alertPluginManager = alertPluginManager; } - public AlertSender(AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) { + public AlertSender(AlertDao alertDao, AlertPluginManager alertPluginManager) { super(); this.alertDao = alertDao; - this.pluginDao = pluginDao; this.alertPluginManager = alertPluginManager; } - public AlertSender(List alertList, AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) { + public AlertSender(List alertList, AlertDao alertDao, AlertPluginManager alertPluginManager) { super(); this.alertList = alertList; this.alertDao = alertDao; - this.pluginDao = pluginDao; this.alertPluginManager = alertPluginManager; } @@ -75,13 +73,14 @@ public class AlertSender { List alertInstanceList = alertDao.listInstanceByAlertGroupId(alertGroupId); if (CollectionUtils.isEmpty(alertInstanceList)) { logger.error("send alert msg fail,no bind plugin instance."); - return; + alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "no bind plugin instance", alert.getId()); + continue; } AlertData alertData = new AlertData(); alertData.setId(alert.getId()) - .setContent(alert.getContent()) - .setLog(alert.getLog()) - .setTitle(alert.getTitle()); + .setContent(alert.getContent()) + .setLog(alert.getLog()) + .setTitle(alert.getTitle()); for (AlertPluginInstance instance : alertInstanceList) { @@ -106,8 +105,8 @@ public class AlertSender { List alertInstanceList = alertDao.listInstanceByAlertGroupId(alertGroupId); AlertData alertData = new AlertData(); - alertData.setContent(title) - .setTitle(content); + alertData.setContent(content) + .setTitle(title); boolean sendResponseStatus = true; List sendResponseResults = new ArrayList<>(); @@ -126,7 +125,7 @@ public class AlertSender { for (AlertPluginInstance instance : alertInstanceList) { AlertResult alertResult = this.alertResultHandler(instance, alertData); AlertSendResponseResult alertSendResponseResult = new AlertSendResponseResult( - Boolean.parseBoolean(String.valueOf(alertResult.getStatus())), alertResult.getMessage()); + Boolean.parseBoolean(String.valueOf(alertResult.getStatus())), alertResult.getMessage()); sendResponseStatus = sendResponseStatus && alertSendResponseResult.getStatus(); sendResponseResults.add(alertSendResponseResult); } @@ -142,7 +141,7 @@ public class AlertSender { * @return AlertResult */ private AlertResult alertResultHandler(AlertPluginInstance instance, AlertData alertData) { - String pluginName = pluginDao.getPluginDefineById(instance.getPluginDefineId()).getPluginName(); + String pluginName = alertPluginManager.getPluginNameById(instance.getPluginDefineId()); AlertChannel alertChannel = alertPluginManager.getAlertChannelMap().get(pluginName); AlertResult alertResultExtend = new AlertResult(); String pluginInstanceName = instance.getInstanceName(); @@ -156,8 +155,16 @@ public class AlertSender { AlertInfo alertInfo = new AlertInfo(); alertInfo.setAlertData(alertData); - alertInfo.setAlertParams(instance.getPluginInstanceParams()); - AlertResult alertResult = alertChannel.process(alertInfo); + Map paramsMap = JSONUtils.toMap(instance.getPluginInstanceParams()); + alertInfo.setAlertParams(paramsMap); + AlertResult alertResult; + try { + alertResult = alertChannel.process(alertInfo); + } catch (Exception e) { + alertResult = new AlertResult("false", e.getMessage()); + logger.error("send alert error alert data id :{},", alertData.getId(), e); + } + if (alertResult == null) { String message = String.format("Alert Plugin %s send error : return alertResult value is null", pluginInstanceName); diff --git a/dolphinscheduler-alert/src/main/resources/alert.properties b/dolphinscheduler-alert/src/main/resources/alert.properties index 1e26d27fec..80ea87c2cd 100644 --- a/dolphinscheduler-alert/src/main/resources/alert.properties +++ b/dolphinscheduler-alert/src/main/resources/alert.properties @@ -21,7 +21,7 @@ #eg : Alert Server Listener port #alert.plugin.dir config the Alert Plugin dir . AlertServer while find and load the Alert Plugin Jar from this dir when deploy and start AlertServer on the server . -#eg :alert.plugin.dir=/opt/soft/spi/lib/plugin/alert +alert.plugin.dir=./lib/plugin/alert #maven.local.repository=/Users/gaojun/Documents/jianguoyun/localRepository diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/AlertServerTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/AlertServerTest.java index a8ead79be9..cdc779e35e 100644 --- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/AlertServerTest.java +++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/AlertServerTest.java @@ -34,12 +34,13 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; @RunWith(PowerMockRunner.class) -@PrepareForTest({AlertServer.class,DaoFactory.class}) +@PrepareForTest({AlertServer.class, DaoFactory.class}) public class AlertServerTest { @Before @@ -61,7 +62,8 @@ public class AlertServerTest { AlertPluginManager alertPluginManager = PowerMockito.mock(AlertPluginManager.class); PowerMockito.whenNew(AlertPluginManager.class).withNoArguments().thenReturn(alertPluginManager); ConcurrentHashMap alertChannelMap = new ConcurrentHashMap<>(); - alertChannelMap.put("pluginName",alertChannelMock); + alertChannelMap.put("pluginName", alertChannelMock); + PowerMockito.when(alertPluginManager.getPluginNameById(Mockito.anyInt())).thenReturn("pluginName"); PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap); DolphinPluginManagerConfig alertPluginManagerConfig = PowerMockito.mock(DolphinPluginManagerConfig.class); @@ -79,7 +81,8 @@ public class AlertServerTest { Assert.assertNotNull(alertServer); new Thread(() -> { - alertServer.start(); }) + alertServer.start(); + }) .start(); Thread.sleep(5 * Constants.ALERT_SCAN_INTERVAL); diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java index 3983545140..4f428d4667 100644 --- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java +++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/plugin/EmailAlertPluginTest.java @@ -136,7 +136,7 @@ public class EmailAlertPluginTest { alertPluginInstance.setPluginInstanceParams(getEmailAlertParams()); alertDao.getAlertPluginInstanceMapper().insert(alertPluginInstance); - AlertSender alertSender = new AlertSender(alertList, alertDao, alertPluginManager, pluginDao); + AlertSender alertSender = new AlertSender(alertList, alertDao, alertPluginManager); alertSender.run(); Alert alertResult = alertDao.getAlertMapper().selectById(alert1.getId()); diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessorTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessorTest.java index 0126eb3dae..052d2f3d55 100644 --- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessorTest.java +++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/processor/AlertRequestProcessorTest.java @@ -19,7 +19,6 @@ package org.apache.dolphinscheduler.alert.processor; import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager; import org.apache.dolphinscheduler.dao.AlertDao; -import org.apache.dolphinscheduler.dao.PluginDao; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.alert.AlertSendRequestCommand; @@ -37,7 +36,6 @@ import io.netty.channel.Channel; public class AlertRequestProcessorTest { private AlertDao alertDao; - private PluginDao pluginDao; private AlertPluginManager alertPluginManager; private AlertRequestProcessor alertRequestProcessor; @@ -45,17 +43,16 @@ public class AlertRequestProcessorTest { @Before public void before() { alertDao = PowerMockito.mock(AlertDao.class); - pluginDao = PowerMockito.mock(PluginDao.class); alertPluginManager = PowerMockito.mock(AlertPluginManager.class); - alertRequestProcessor = new AlertRequestProcessor(alertDao,alertPluginManager,pluginDao); + alertRequestProcessor = new AlertRequestProcessor(alertDao, alertPluginManager); } @Test public void testProcess() { Channel channel = PowerMockito.mock(Channel.class); - AlertSendRequestCommand alertSendRequestCommand = new AlertSendRequestCommand(1,"title","content"); + AlertSendRequestCommand alertSendRequestCommand = new AlertSendRequestCommand(1, "title", "content"); Command reqCommand = alertSendRequestCommand.convert2Command(); - Assert.assertEquals(CommandType.ALERT_SEND_REQUEST,reqCommand.getType()); - alertRequestProcessor.process(channel,reqCommand); + Assert.assertEquals(CommandType.ALERT_SEND_REQUEST, reqCommand.getType()); + alertRequestProcessor.process(channel, reqCommand); } } diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/runner/AlertSenderTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/runner/AlertSenderTest.java index 2664bdcd29..3b84bdbe67 100644 --- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/runner/AlertSenderTest.java +++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/runner/AlertSenderTest.java @@ -67,7 +67,7 @@ public class AlertSenderTest { int alertGroupId = 1; String title = "alert mail test title"; String content = "alert mail test content"; - alertSender = new AlertSender(alertDao,alertPluginManager,pluginDao); + alertSender = new AlertSender(alertDao, alertPluginManager); //1.alert instance does not exist PowerMockito.when(alertDao.listInstanceByAlertGroupId(alertGroupId)).thenReturn(null); @@ -75,7 +75,7 @@ public class AlertSenderTest { AlertSendResponseCommand alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content); Assert.assertFalse(alertSendResponseCommand.getResStatus()); alertSendResponseCommand.getResResults().forEach(result -> - logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage())); + logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage())); //2.alert plugin does not exist int pluginDefineId = 1; @@ -83,30 +83,31 @@ public class AlertSenderTest { String pluginInstanceName = "alert-instance-mail"; List alertInstanceList = new ArrayList<>(); AlertPluginInstance alertPluginInstance = new AlertPluginInstance( - pluginDefineId,pluginInstanceParams,pluginInstanceName); + pluginDefineId, pluginInstanceParams, pluginInstanceName); alertInstanceList.add(alertPluginInstance); PowerMockito.when(alertDao.listInstanceByAlertGroupId(1)).thenReturn(alertInstanceList); String pluginName = "alert-plugin-mail"; - PluginDefine pluginDefine = new PluginDefine(pluginName,"1",null); + PluginDefine pluginDefine = new PluginDefine(pluginName, "1", null); PowerMockito.when(pluginDao.getPluginDefineById(pluginDefineId)).thenReturn(pluginDefine); alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content); Assert.assertFalse(alertSendResponseCommand.getResStatus()); alertSendResponseCommand.getResResults().forEach(result -> - logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage())); + logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage())); //3.alert result value is null AlertChannel alertChannelMock = PowerMockito.mock(AlertChannel.class); PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(null); Map alertChannelMap = new ConcurrentHashMap<>(); - alertChannelMap.put(pluginName,alertChannelMock); + alertChannelMap.put(pluginName, alertChannelMock); PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap); + PowerMockito.when(alertPluginManager.getPluginNameById(Mockito.anyInt())).thenReturn("alert-plugin-mail"); alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content); Assert.assertFalse(alertSendResponseCommand.getResStatus()); alertSendResponseCommand.getResResults().forEach(result -> - logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage())); + logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage())); //4.abnormal information inside the alert plug-in code AlertResult alertResult = new AlertResult(); @@ -114,27 +115,27 @@ public class AlertSenderTest { alertResult.setMessage("Abnormal information inside the alert plug-in code"); PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult); alertChannelMap = new ConcurrentHashMap<>(); - alertChannelMap.put(pluginName,alertChannelMock); + alertChannelMap.put(pluginName, alertChannelMock); PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap); alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content); Assert.assertFalse(alertSendResponseCommand.getResStatus()); alertSendResponseCommand.getResResults().forEach(result -> - logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage())); + logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage())); //5.alert plugin send success alertResult = new AlertResult(); alertResult.setStatus(String.valueOf(true)); - alertResult.setMessage(String.format("Alert Plugin %s send success",pluginInstanceName)); + alertResult.setMessage(String.format("Alert Plugin %s send success", pluginInstanceName)); PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult); alertChannelMap = new ConcurrentHashMap<>(); - alertChannelMap.put(pluginName,alertChannelMock); + alertChannelMap.put(pluginName, alertChannelMock); PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap); alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content); Assert.assertTrue(alertSendResponseCommand.getResStatus()); alertSendResponseCommand.getResResults().forEach(result -> - logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage())); + logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage())); } @@ -150,28 +151,29 @@ public class AlertSenderTest { alert.setContent(content); alertList.add(alert); - alertSender = new AlertSender(alertList,alertDao,alertPluginManager,pluginDao); + alertSender = new AlertSender(alertList, alertDao, alertPluginManager); int pluginDefineId = 1; String pluginInstanceParams = "alert-instance-mail-params"; String pluginInstanceName = "alert-instance-mail"; List alertInstanceList = new ArrayList<>(); AlertPluginInstance alertPluginInstance = new AlertPluginInstance( - pluginDefineId,pluginInstanceParams,pluginInstanceName); + pluginDefineId, pluginInstanceParams, pluginInstanceName); alertInstanceList.add(alertPluginInstance); PowerMockito.when(alertDao.listInstanceByAlertGroupId(alertGroupId)).thenReturn(alertInstanceList); String pluginName = "alert-plugin-mail"; - PluginDefine pluginDefine = new PluginDefine(pluginName,"1",null); + PluginDefine pluginDefine = new PluginDefine(pluginName, "1", null); PowerMockito.when(pluginDao.getPluginDefineById(pluginDefineId)).thenReturn(pluginDefine); + PowerMockito.when(alertPluginManager.getPluginNameById(1)).thenReturn("alert-instance-mail"); AlertResult alertResult = new AlertResult(); alertResult.setStatus(String.valueOf(true)); - alertResult.setMessage(String.format("Alert Plugin %s send success",pluginInstanceName)); + alertResult.setMessage(String.format("Alert Plugin %s send success", pluginInstanceName)); AlertChannel alertChannelMock = PowerMockito.mock(AlertChannel.class); PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult); ConcurrentHashMap alertChannelMap = new ConcurrentHashMap<>(); - alertChannelMap.put(pluginName,alertChannelMock); + alertChannelMap.put(pluginName, alertChannelMock); PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap); Assert.assertTrue(Boolean.parseBoolean(alertResult.getStatus())); alertSender.run(); diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java index a67ade2ba7..9fbe8f4e0a 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java @@ -96,10 +96,14 @@ public class DataSourceController extends BaseController { @ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"), @ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"), + @ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"), @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"), @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"), - @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String") + @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"), + @ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String") }) @PostMapping(value = "/create") @ResponseStatus(HttpStatus.CREATED) @@ -115,10 +119,14 @@ public class DataSourceController extends BaseController { @RequestParam(value = "userName") String userName, @RequestParam(value = "password") String password, @RequestParam(value = "connectType") DbConnectType connectType, - @RequestParam(value = "other") String other) { + @RequestParam(value = "other") String other, + @RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf, + @RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername, + @RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) { logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}", loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other); - String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other); + String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other, + javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath); return dataSourceService.createDataSource(loginUser, name, note, type, parameter); } @@ -149,10 +157,14 @@ public class DataSourceController extends BaseController { @ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"), @ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"), + @ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"), @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"), @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"), - @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String") + @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"), + @ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String") }) @PostMapping(value = "/update") @ResponseStatus(HttpStatus.OK) @@ -169,10 +181,14 @@ public class DataSourceController extends BaseController { @RequestParam(value = "userName") String userName, @RequestParam(value = "password") String password, @RequestParam(value = "connectType") DbConnectType connectType, - @RequestParam(value = "other") String other) { + @RequestParam(value = "other") String other, + @RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf, + @RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername, + @RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) { logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}", loginUser.getUserName(), name, note, type, connectType, other); - String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other); + String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other, + javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath); return dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter); } @@ -274,10 +290,14 @@ public class DataSourceController extends BaseController { @ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"), @ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"), @ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"), + @ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"), @ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"), @ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"), @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"), - @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String") + @ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"), + @ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"), + @ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String") }) @PostMapping(value = "/connect") @ResponseStatus(HttpStatus.OK) @@ -293,10 +313,14 @@ public class DataSourceController extends BaseController { @RequestParam(value = "userName") String userName, @RequestParam(value = "password") String password, @RequestParam(value = "connectType") DbConnectType connectType, - @RequestParam(value = "other") String other) { + @RequestParam(value = "other") String other, + @RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf, + @RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername, + @RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) { logger.info("login user {}, connect datasource: {}, note: {}, type: {}, connectType: {}, other: {}", loginUser.getUserName(), name, note, type, connectType, other); - String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other); + String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other, + javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath); return dataSourceService.checkConnection(type, parameter); } diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java index 04ef02303d..895a3ded59 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java @@ -133,7 +133,7 @@ public enum Status { QUERY_TASK_INSTANCE_LOG_ERROR(10103, "view task instance log error", "查询任务实例日志错误"), DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104, "download task instance log file error", "下载任务日志文件错误"), CREATE_PROCESS_DEFINITION(10105, "create process definition", "创建工作流错误"), - VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称已存在"), + VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称验证错误"), UPDATE_PROCESS_DEFINITION_ERROR(10107, "update process definition error", "更新工作流定义错误"), RELEASE_PROCESS_DEFINITION_ERROR(10108, "release process definition error", "上线工作流错误"), QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR(10109, "query datail of process definition error", "查询工作流详细信息错误"), @@ -199,6 +199,7 @@ public enum Status { FORCE_TASK_SUCCESS_ERROR(10165, "force task success error", "强制成功任务实例错误"), TASK_INSTANCE_STATE_OPERATION_ERROR(10166, "the status of task instance {0} is {1},Cannot perform force success operation", "任务实例[{0}]的状态是[{1}],无法执行强制成功操作"), DATASOURCE_TYPE_NOT_EXIST(10167, "data source type not exist", "数据源类型不存在"), + PROCESS_DEFINITION_NAME_EXIST(10168, "process definition name {0} already exists", "工作流定义名称[{0}]已存在"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"), UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"), @@ -290,6 +291,7 @@ public enum Status { QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR(110009, "query all alert plugin instance error", "查询所有告警实例失败"), PLUGIN_INSTANCE_ALREADY_EXIT(110010,"plugin instance already exit","该告警插件实例已存在"), LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR(110011,"query plugin instance page error","分页查询告警实例失败"), + DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED(110012,"failed to delete the alert instance, there is an alarm group associated with this alert instance","删除告警实例失败,存在与此告警实例关联的警报组") ; diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LocaleChangeInterceptor.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LocaleChangeInterceptor.java index 3fe236e065..2ed4e6f24b 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LocaleChangeInterceptor.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/interceptor/LocaleChangeInterceptor.java @@ -17,7 +17,6 @@ package org.apache.dolphinscheduler.api.interceptor; -import org.apache.dolphinscheduler.api.service.BaseService; import org.apache.dolphinscheduler.common.Constants; import java.util.Locale; @@ -30,12 +29,13 @@ import org.springframework.context.i18n.LocaleContextHolder; import org.springframework.lang.Nullable; import org.springframework.util.StringUtils; import org.springframework.web.servlet.handler.HandlerInterceptorAdapter; +import org.springframework.web.util.WebUtils; public class LocaleChangeInterceptor extends HandlerInterceptorAdapter { @Override public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) { - Cookie cookie = BaseService.getCookie(request, Constants.LOCALE_LANGUAGE); + Cookie cookie = WebUtils.getCookie(request, Constants.LOCALE_LANGUAGE); if (cookie != null) { // Proceed in cookie return true; diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseService.java index c61cd1eeb5..55826bf3d2 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/BaseService.java @@ -113,27 +113,6 @@ public class BaseService { return false; } - - /** - * get cookie info by name - * - * @param request request - * @param name 'sessionId' - * @return get cookie info - */ - public static Cookie getCookie(HttpServletRequest request, String name) { - Cookie[] cookies = request.getCookies(); - if (cookies != null && cookies.length > 0) { - for (Cookie cookie : cookies) { - if (StringUtils.isNotEmpty(name) && name.equalsIgnoreCase(cookie.getName())) { - return cookie; - } - } - } - - return null; - } - /** * create tenant dir if not exists * diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java index 58bb657c6f..2ca9cbea6a 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java @@ -267,6 +267,9 @@ public class DataSourceService extends BaseService { map.put(HOST, host); map.put(PORT, port); map.put(PRINCIPAL, datasourceForm.getPrincipal()); + map.put(Constants.KERBEROS_KRB5_CONF_PATH, datasourceForm.getJavaSecurityKrb5Conf()); + map.put(Constants.KERBEROS_KEY_TAB_USERNAME, datasourceForm.getLoginUserKeytabUsername()); + map.put(Constants.KERBEROS_KEY_TAB_PATH, datasourceForm.getLoginUserKeytabPath()); map.put(DATABASE, database); map.put(USER_NAME, datasourceForm.getUser()); map.put(OTHER, otherMap); @@ -424,7 +427,8 @@ public class DataSourceService extends BaseService { */ public String buildParameter(DbType type, String host, String port, String database, String principal, String userName, - String password, DbConnectType connectType, String other) { + String password, DbConnectType connectType, String other, + String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) { String address = buildAddress(type, host, port, connectType); Map parameterMap = new LinkedHashMap(6); @@ -467,6 +471,9 @@ public class DataSourceService extends BaseService { if (CommonUtils.getKerberosStartupState() && (type == DbType.HIVE || type == DbType.SPARK)) { parameterMap.put(Constants.PRINCIPAL, principal); + parameterMap.put(Constants.KERBEROS_KRB5_CONF_PATH, javaSecurityKrb5Conf); + parameterMap.put(Constants.KERBEROS_KEY_TAB_USERNAME, loginUserKeytabUsername); + parameterMap.put(Constants.KERBEROS_KEY_TAB_PATH, loginUserKeytabPath); } Map map = JSONUtils.toMap(other); diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java index 1d6169b06e..6458a768d8 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java @@ -256,7 +256,7 @@ public class ProcessInstanceService extends BaseService { List processInstances = processInstanceList.getRecords(); for (ProcessInstance processInstance : processInstances) { - processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime())); + processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())); User executor = usersService.queryUser(processInstance.getExecutorId()); if (null != executor) { processInstance.setExecutorName(executor.getUserName()); @@ -428,10 +428,9 @@ public class ProcessInstanceService extends BaseService { return result; } Date schedule = null; + schedule = processInstance.getScheduleTime(); if (scheduleTime != null) { schedule = DateUtils.getScheduleDate(scheduleTime); - } else { - schedule = processInstance.getScheduleTime(); } processInstance.setScheduleTime(schedule); processInstance.setLocations(locations); @@ -460,13 +459,18 @@ public class ProcessInstanceService extends BaseService { if (tenant != null) { processInstance.setTenantCode(tenant.getTenantCode()); } + // get the processinstancejson before saving,and then save the name and taskid + String oldJson = processInstance.getProcessInstanceJson(); + if (StringUtils.isNotEmpty(oldJson)) { + processInstanceJson = processService.changeJson(processData,oldJson); + } processInstance.setProcessInstanceJson(processInstanceJson); processInstance.setGlobalParams(globalParams); } int update = processService.updateProcessInstance(processInstance); int updateDefine = 1; - if (Boolean.TRUE.equals(syncDefine) && StringUtils.isNotEmpty(processInstanceJson)) { + if (Boolean.TRUE.equals(syncDefine)) { processDefinition.setProcessDefinitionJson(processInstanceJson); processDefinition.setGlobalParams(originDefParams); processDefinition.setLocations(locations); diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java index dd2caff3b6..6c68202313 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskInstanceService.java @@ -131,7 +131,7 @@ public class TaskInstanceService extends BaseService { List taskInstanceList = taskInstanceIPage.getRecords(); for (TaskInstance taskInstance : taskInstanceList) { - taskInstance.setDuration(DateUtils.differSec(taskInstance.getStartTime(), taskInstance.getEndTime())); + taskInstance.setDuration(DateUtils.format2Duration(taskInstance.getStartTime(), taskInstance.getEndTime())); User executor = usersService.queryUser(taskInstance.getExecutorId()); if (null != executor) { taskInstance.setExecutorName(executor.getUserName()); diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java index 83cbd197cd..1c634a9cd2 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java @@ -153,19 +153,21 @@ public class WorkerGroupService extends BaseService { } } - // available workerGroup list - List availableWorkerGroupList = new ArrayList<>(); - for (String workerGroup : workerGroupList) { String workerGroupPath = workerPath + "/" + workerGroup; List childrenNodes = zookeeperCachedOperator.getChildrenKeys(workerGroupPath); + String timeStamp = ""; + for (int i = 0; i < childrenNodes.size(); i++) { + String ip = childrenNodes.get(i); + childrenNodes.set(i, ip.substring(0, ip.lastIndexOf(":"))); + timeStamp = ip.substring(ip.lastIndexOf(":")); + } if (CollectionUtils.isNotEmpty(childrenNodes)) { - availableWorkerGroupList.add(workerGroup); WorkerGroup wg = new WorkerGroup(); wg.setName(workerGroup); if (isPaging) { wg.setIpList(childrenNodes); - String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0)); + String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0) + timeStamp); wg.setCreateTime(DateUtils.stringToDate(registeredIpValue.split(",")[6])); wg.setUpdateTime(DateUtils.stringToDate(registeredIpValue.split(",")[7])); } diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/AlertPluginInstanceServiceImpl.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/AlertPluginInstanceServiceImpl.java index 8004a90fae..4d3d35a76e 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/AlertPluginInstanceServiceImpl.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/AlertPluginInstanceServiceImpl.java @@ -24,16 +24,25 @@ import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.vo.AlertPluginInstanceVO; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.CollectionUtils; +import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; import org.apache.dolphinscheduler.dao.entity.PluginDefine; import org.apache.dolphinscheduler.dao.entity.User; +import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.PluginDefineMapper; +import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; +import org.apache.dolphinscheduler.spi.params.base.PluginParams; + +import org.apache.commons.collections4.MapUtils; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.function.Function; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; @@ -56,6 +65,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert @Autowired private PluginDefineMapper pluginDefineMapper; + @Autowired + private AlertGroupMapper alertGroupMapper; + /** * creat alert plugin instance * @@ -67,7 +79,8 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert @Override public Map create(User loginUser, int pluginDefineId, String instanceName, String pluginInstanceParams) { AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); - alertPluginInstance.setPluginInstanceParams(pluginInstanceParams); + String paramsMapJson = parsePluginParamsMap(pluginInstanceParams); + alertPluginInstance.setPluginInstanceParams(paramsMapJson); alertPluginInstance.setInstanceName(instanceName); alertPluginInstance.setPluginDefineId(pluginDefineId); @@ -82,7 +95,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert if (i > 0) { putMsg(result, Status.SUCCESS); + return result; } + putMsg(result, Status.SAVE_ERROR); return result; } @@ -98,7 +113,8 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert public Map update(User loginUser, int pluginInstanceId, String instanceName, String pluginInstanceParams) { AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); - alertPluginInstance.setPluginInstanceParams(pluginInstanceParams); + String paramsMapJson = parsePluginParamsMap(pluginInstanceParams); + alertPluginInstance.setPluginInstanceParams(paramsMapJson); alertPluginInstance.setInstanceName(instanceName); alertPluginInstance.setId(pluginInstanceId); Map result = new HashMap<>(); @@ -106,8 +122,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert if (i > 0) { putMsg(result, Status.SUCCESS); + return result; } - + putMsg(result, Status.SAVE_ERROR); return result; } @@ -121,6 +138,13 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert @Override public Map delete(User loginUser, int id) { Map result = new HashMap<>(); + //check if there is an associated alert group + boolean hasAssociatedAlertGroup = checkHasAssociatedAlertGroup(String.valueOf(id)); + if (hasAssociatedAlertGroup) { + putMsg(result, Status.DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED); + return result; + } + int i = alertPluginInstanceMapper.deleteById(id); if (i > 0) { putMsg(result, Status.SUCCESS); @@ -188,21 +212,73 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert if (CollectionUtils.isEmpty(pluginDefineList)) { return null; } - Map pluginDefineMap = pluginDefineList.stream().collect(Collectors.toMap(PluginDefine::getId, PluginDefine::getPluginName)); + Map pluginDefineMap = pluginDefineList.stream().collect(Collectors.toMap(PluginDefine::getId, Function.identity())); List alertPluginInstanceVOS = new ArrayList<>(); alertPluginInstances.forEach(alertPluginInstance -> { AlertPluginInstanceVO alertPluginInstanceVO = new AlertPluginInstanceVO(); - alertPluginInstanceVO.setAlertPluginName(pluginDefineMap.get(alertPluginInstance.getPluginDefineId())); + alertPluginInstanceVO.setCreateTime(alertPluginInstance.getCreateTime()); alertPluginInstanceVO.setUpdateTime(alertPluginInstance.getUpdateTime()); alertPluginInstanceVO.setPluginDefineId(alertPluginInstance.getPluginDefineId()); alertPluginInstanceVO.setInstanceName(alertPluginInstance.getInstanceName()); alertPluginInstanceVO.setId(alertPluginInstance.getId()); + PluginDefine pluginDefine = pluginDefineMap.get(alertPluginInstance.getPluginDefineId()); + //FIXME When the user removes the plug-in, this will happen. At this time, maybe we should add a new field to indicate that the plug-in has expired? + if (null == pluginDefine) { + return; + } + alertPluginInstanceVO.setAlertPluginName(pluginDefine.getPluginName()); //todo List pages do not recommend returning this parameter - alertPluginInstanceVO.setPluginInstanceParams(alertPluginInstance.getPluginInstanceParams()); + String pluginParamsMapString = alertPluginInstance.getPluginInstanceParams(); + String uiPluginParams = parseToPluginUiParams(pluginParamsMapString, pluginDefine.getPluginParams()); + alertPluginInstanceVO.setPluginInstanceParams(uiPluginParams); alertPluginInstanceVOS.add(alertPluginInstanceVO); }); return alertPluginInstanceVOS; } + + /** + * Get the parameters actually needed by the plugin + * + * @param pluginParams Complete parameters(include ui) + * @return k, v(json string) + */ + private String parsePluginParamsMap(String pluginParams) { + Map paramsMap = PluginParamsTransfer.getPluginParamsMap(pluginParams); + return JSONUtils.toJsonString(paramsMap); + } + + /** + * parseToPluginUiParams + * + * @param pluginParamsMapString k-v data + * @param pluginUiParams Complete parameters(include ui) + * @return Complete parameters list(include ui) + */ + private String parseToPluginUiParams(String pluginParamsMapString, String pluginUiParams) { + Map paramsMap = JSONUtils.toMap(pluginParamsMapString); + if (MapUtils.isEmpty(paramsMap)) { + return null; + } + List pluginParamsList = JSONUtils.toList(pluginUiParams, PluginParams.class); + List newPluginParamsList = new ArrayList<>(pluginParamsList.size()); + pluginParamsList.forEach(pluginParams -> { + pluginParams.setValue(paramsMap.get(pluginParams.getName())); + newPluginParamsList.add(pluginParams); + + }); + + return JSONUtils.toJsonString(newPluginParamsList); + } + + private boolean checkHasAssociatedAlertGroup(String id) { + List idsList = alertGroupMapper.queryInstanceIdsList(); + if (CollectionUtils.isEmpty(idsList)) { + return false; + } + Optional first = idsList.stream().filter(k -> null != k && Arrays.asList(k.split(",")).contains(id)).findFirst(); + return first.isPresent(); + } + } diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java index 9810674863..2a2ae78618 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java @@ -429,11 +429,13 @@ public class ProcessDefinitionServiceImpl extends BaseService implements // check whether the new process define name exist ProcessDefinition definition = processDefineMapper.verifyByDefineName(project.getId(), name); if (definition != null) { - putMsg(result, Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR, name); + putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } - + // get the processdefinitionjson before saving,and then save the name and taskid + String oldJson = processDefine.getProcessDefinitionJson(); + processDefinitionJson = processService.changeJson(processData,oldJson); Date now = new Date(); processDefine.setId(id); @@ -495,7 +497,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { - putMsg(result, Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR, name); + putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); } return result; } @@ -1325,7 +1327,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements List processInstanceList = processInstanceService.queryByProcessDefineId(processId, limit); for (ProcessInstance processInstance : processInstanceList) { - processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime())); + processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())); } if (limit > processInstanceList.size()) { diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SessionServiceImpl.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SessionServiceImpl.java index 8aaefdadff..917ebdf3a8 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SessionServiceImpl.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SessionServiceImpl.java @@ -38,6 +38,7 @@ import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; +import org.springframework.web.util.WebUtils; /** * session service implement @@ -60,7 +61,7 @@ public class SessionServiceImpl extends BaseService implements SessionService { String sessionId = request.getHeader(Constants.SESSION_ID); if (StringUtils.isBlank(sessionId)) { - Cookie cookie = getCookie(request, Constants.SESSION_ID); + Cookie cookie = WebUtils.getCookie(request, Constants.SESSION_ID); if (cookie != null) { sessionId = cookie.getValue(); diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages.properties b/dolphinscheduler-api/src/main/resources/i18n/messages.properties index cae8e5fa59..62d3615664 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages.properties @@ -125,6 +125,10 @@ TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} +DATA_SOURCE_PRINCIPAL=principal +DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf +DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username +DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties b/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties index 18b7a10885..74fc4d01b0 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties @@ -125,6 +125,10 @@ TENANT_CODE=os tenant code QUEUE_NAME=queue name PASSWORD=password DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...} +DATA_SOURCE_PRINCIPAL=principal +DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf +DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username +DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path PROJECT_TAG=project related operation CREATE_PROJECT_NOTES=create project PROJECT_DESC=project description diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties b/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties index a333ef1fa5..f1e8ebefc3 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties @@ -119,6 +119,10 @@ TENANT_CODE=操作系统租户 QUEUE_NAME=队列名 PASSWORD=密码 DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...} +DATA_SOURCE_PRINCIPAL=principal +DATA_SOURCE_KERBEROS_KRB5_CONF=kerberos认证参数 java.security.krb5.conf +DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=kerberos认证参数 login.user.keytab.username +DATA_SOURCE_KERBEROS_KEYTAB_PATH=kerberos认证参数 login.user.keytab.path PROJECT_TAG=项目相关操作 CREATE_PROJECT_NOTES=创建项目 PROJECT_DESC=项目描述 diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java index 9719448ad8..e44fad8151 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java @@ -117,14 +117,14 @@ public class ProcessDefinitionControllerTest { public void testVerifyProcessDefinitionName() throws Exception { Map result = new HashMap<>(); - putMsg(result, Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR); + putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST); String projectName = "test"; String name = "dag_test"; Mockito.when(processDefinitionService.verifyProcessDefinitionName(user, projectName, name)).thenReturn(result); Result response = processDefinitionController.verifyProcessDefinitionName(user, projectName, name); - Assert.assertEquals(Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR.getCode(), response.getCode().intValue()); + Assert.assertEquals(Status.PROCESS_DEFINITION_NAME_EXIST.getCode(), response.getCode().intValue()); } diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/AlertPluginInstanceServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/AlertPluginInstanceServiceTest.java new file mode 100644 index 0000000000..fb58070304 --- /dev/null +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/AlertPluginInstanceServiceTest.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.api.service; + +import org.apache.dolphinscheduler.api.enums.Status; +import org.apache.dolphinscheduler.api.service.impl.AlertPluginInstanceServiceImpl; +import org.apache.dolphinscheduler.common.Constants; +import org.apache.dolphinscheduler.common.enums.UserType; +import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; +import org.apache.dolphinscheduler.dao.entity.PluginDefine; +import org.apache.dolphinscheduler.dao.entity.User; +import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; +import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper; +import org.apache.dolphinscheduler.dao.mapper.PluginDefineMapper; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class AlertPluginInstanceServiceTest { + + @InjectMocks + AlertPluginInstanceServiceImpl alertPluginInstanceService; + + @Mock + private AlertPluginInstanceMapper alertPluginInstanceMapper; + + @Mock + private PluginDefineMapper pluginDefineMapper; + + @Mock + private AlertGroupMapper alertGroupMapper; + + private List alertPluginInstances; + + private User user; + + private String uiParams = "[\n" + + " {\n" + + " \"field\":\"userParams\",\n" + + " \"name\":\"user.params\",\n" + + " \"props\":{\n" + + " \"placeholder\":\"please enter your custom parameters, which will be passed to you when calling your script\",\n" + + " \"size\":\"small\"\n" + + " },\n" + + " \"type\":\"input\",\n" + + " \"title\":\"user.params\",\n" + + " \"value\":\"userParams\",\n" + + " \"validate\":[\n" + + " {\n" + + " \"required\":false,\n" + + " \"message\":null,\n" + + " \"type\":\"string\",\n" + + " \"trigger\":\"blur\",\n" + + " \"min\":null,\n" + + " \"max\":null\n" + + " }\n" + + " ]\n" + + " },\n" + + " {\n" + + " \"field\":\"path\",\n" + + " \"name\":\"path\",\n" + + " \"props\":{\n" + + " \"placeholder\":\"please upload the file to the disk directory of the alert server, and ensure that the path is absolute and has the corresponding access rights\",\n" + + " \"size\":\"small\"\n" + + " },\n" + + " \"type\":\"input\",\n" + + " \"title\":\"path\",\n" + + " \"value\":\"/kris/script/path\",\n" + + " \"validate\":[\n" + + " {\n" + + " \"required\":true,\n" + + " \"message\":null,\n" + + " \"type\":\"string\",\n" + + " \"trigger\":\"blur\",\n" + + " \"min\":null,\n" + + " \"max\":null\n" + + " }\n" + + " ]\n" + + " },\n" + + " {\n" + + " \"field\":\"type\",\n" + + " \"name\":\"type\",\n" + + " \"props\":{\n" + + " \"placeholder\":null,\n" + + " \"size\":\"small\"\n" + + " },\n" + + " \"type\":\"radio\",\n" + + " \"title\":\"type\",\n" + + " \"value\":0,\n" + + " \"validate\":[\n" + + " {\n" + + " \"required\":true,\n" + + " \"message\":null,\n" + + " \"type\":\"string\",\n" + + " \"trigger\":\"blur\",\n" + + " \"min\":null,\n" + + " \"max\":null\n" + + " }\n" + + " ],\n" + + " \"options\":[\n" + + " {\n" + + " \"label\":\"SHELL\",\n" + + " \"value\":0,\n" + + " \"disabled\":false\n" + + " }\n" + + " ]\n" + + " }\n" + + "]\n" + + "\n"; + + private String paramsMap = "{\"path\":\"/kris/script/path\",\"userParams\":\"userParams\",\"type\":\"0\"}"; + + @Before + public void before() { + user = new User(); + user.setUserType(UserType.ADMIN_USER); + user.setId(1); + AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); + alertPluginInstance.setPluginInstanceParams("test1"); + alertPluginInstance.setPluginDefineId(1); + alertPluginInstance.setId(1); + alertPluginInstance.setPluginInstanceParams("test"); + alertPluginInstances = new ArrayList<>(); + alertPluginInstances.add(alertPluginInstance); + } + + @Test + public void testCreate() { + Mockito.when(alertPluginInstanceMapper.queryByInstanceName("test")).thenReturn(alertPluginInstances); + Map result = alertPluginInstanceService.create(user, 1, "test", uiParams); + Assert.assertEquals(Status.PLUGIN_INSTANCE_ALREADY_EXIT, result.get(Constants.STATUS)); + Mockito.when(alertPluginInstanceMapper.insert(Mockito.any())).thenReturn(1); + result = alertPluginInstanceService.create(user, 1, "test1", uiParams); + Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); + } + + @Test + public void testDelete() { + List ids = Arrays.asList("11,2,3", null, "98,1"); + Mockito.when(alertGroupMapper.queryInstanceIdsList()).thenReturn(ids); + Map result = alertPluginInstanceService.delete(user, 1); + Assert.assertEquals(Status.DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED, result.get(Constants.STATUS)); + Mockito.when(alertPluginInstanceMapper.deleteById(9)).thenReturn(1); + result = alertPluginInstanceService.delete(user, 9); + Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); + + } + + @Test + public void testUpdate() { + Mockito.when(alertPluginInstanceMapper.updateById(Mockito.any())).thenReturn(0); + Map result = alertPluginInstanceService.update(user, 1, "testUpdate", uiParams); + Assert.assertEquals(Status.SAVE_ERROR, result.get(Constants.STATUS)); + Mockito.when(alertPluginInstanceMapper.updateById(Mockito.any())).thenReturn(1); + result = alertPluginInstanceService.update(user, 1, "testUpdate", uiParams); + Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); + } + + @Test + public void testQueryAll() { + AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); + alertPluginInstance.setId(1); + alertPluginInstance.setPluginDefineId(1); + alertPluginInstance.setPluginInstanceParams(paramsMap); + alertPluginInstance.setInstanceName("test"); + PluginDefine pluginDefine = new PluginDefine("script", "script", uiParams); + pluginDefine.setId(1); + List pluginDefines = Collections.singletonList(pluginDefine); + List pluginInstanceList = Collections.singletonList(alertPluginInstance); + Mockito.when(alertPluginInstanceMapper.queryAllAlertPluginInstanceList()).thenReturn(pluginInstanceList); + Mockito.when(pluginDefineMapper.queryAllPluginDefineList()).thenReturn(pluginDefines); + Map result = alertPluginInstanceService.queryAll(); + Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); + } + +} diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/BaseServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/BaseServiceTest.java index 95083dd51b..968dd470f6 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/BaseServiceTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/BaseServiceTest.java @@ -94,20 +94,6 @@ public class BaseServiceTest { baseService.putMsg(result,Status.PROJECT_NOT_FOUNT,"test"); } @Test - public void testGetCookie(){ - - MockHttpServletRequest request = new MockHttpServletRequest(); - MockCookie mockCookie = new MockCookie("userId","1"); - request.setCookies(mockCookie); - //cookie is not null - Cookie cookie = BaseService.getCookie(request,"userId"); - Assert.assertNotNull(cookie); - //cookie is null - cookie = BaseService.getCookie(request,"userName"); - Assert.assertNull(cookie); - - } - @Test public void testCreateTenantDirIfNotExists(){ PowerMockito.mockStatic(HadoopUtils.class); diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java index 84ccd2e0a3..13eb1b9c2e 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataSourceServiceTest.java @@ -23,6 +23,7 @@ import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.DbConnectType; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.UserType; +import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.datasource.BaseDataSource; @@ -51,7 +52,7 @@ import org.powermock.modules.junit4.PowerMockRunner; @RunWith(PowerMockRunner.class) @PowerMockIgnore({"sun.security.*", "javax.net.*"}) -@PrepareForTest({DataSourceFactory.class}) +@PrepareForTest({DataSourceFactory.class, CommonUtils.class}) public class DataSourceServiceTest { @@ -68,7 +69,7 @@ public class DataSourceServiceTest { String dataSourceName = "dataSource01"; String dataSourceDesc = "test dataSource"; DbType dataSourceType = DbType.POSTGRESQL; - String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null); + String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null); // data source exits List dataSourceList = new ArrayList<>(); @@ -110,7 +111,7 @@ public class DataSourceServiceTest { String dataSourceName = "dataSource01"; String dataSourceDesc = "test dataSource"; DbType dataSourceType = DbType.POSTGRESQL; - String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null); + String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null); // data source not exits PowerMockito.when(dataSourceMapper.selectById(dataSourceId)).thenReturn(null); @@ -274,24 +275,35 @@ public class DataSourceServiceTest { @Test public void buildParameter() { String param = dataSourceService.buildParameter(DbType.ORACLE, "192.168.9.1", "1521", "im" - , "", "test", "test", DbConnectType.ORACLE_SERVICE_NAME, ""); + , "", "test", "test", DbConnectType.ORACLE_SERVICE_NAME, "", "", "",""); String expected = "{\"connectType\":\"ORACLE_SERVICE_NAME\",\"type\":\"ORACLE_SERVICE_NAME\",\"address\":\"jdbc:oracle:thin:@//192.168.9.1:1521\",\"database\":\"im\"," + "\"jdbcUrl\":\"jdbc:oracle:thin:@//192.168.9.1:1521/im\",\"user\":\"test\",\"password\":\"test\"}"; Assert.assertEquals(expected, param); + + PowerMockito.mockStatic(CommonUtils.class); + PowerMockito.when(CommonUtils.getKerberosStartupState()).thenReturn(true); + PowerMockito.when(CommonUtils.encodePassword(Mockito.anyString())).thenReturn("test"); + param = dataSourceService.buildParameter(DbType.HIVE, "192.168.9.1", "10000", "im" + , "hive/hdfs-mycluster@ESZ.COM", "test", "test", null, "", "/opt/krb5.conf", "test2/hdfs-mycluster@ESZ.COM", "/opt/hdfs.headless.keytab"); + expected = "{\"type\":null,\"address\":\"jdbc:hive2://192.168.9.1:10000\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:hive2://192.168.9.1:10000/im;principal=hive/hdfs-mycluster@ESZ.COM\"," + + "\"user\":\"test\",\"password\":\"test\",\"principal\":\"hive/hdfs-mycluster@ESZ.COM\",\"javaSecurityKrb5Conf\":\"/opt/krb5.conf\"," + + "\"loginUserKeytabUsername\":\"test2/hdfs-mycluster@ESZ.COM\",\"loginUserKeytabPath\":\"/opt/hdfs.headless.keytab\"}"; + Assert.assertEquals(expected, param); + } @Test public void buildParameterWithDecodePassword() { PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "true"); String param = dataSourceService.buildParameter(DbType.MYSQL, "192.168.9.1", "1521", "im" - , "", "test", "123456", null, ""); + , "", "test", "123456", null, "", "", "", ""); String expected = "{\"type\":null,\"address\":\"jdbc:mysql://192.168.9.1:1521\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:mysql://192.168.9.1:1521/im\"," + "\"user\":\"test\",\"password\":\"IUAjJCVeJipNVEl6TkRVMg==\"}"; Assert.assertEquals(expected, param); PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "false"); param = dataSourceService.buildParameter(DbType.MYSQL, "192.168.9.1", "1521", "im" - , "", "test", "123456", null, ""); + , "", "test", "123456", null, "", "", "", ""); expected = "{\"type\":null,\"address\":\"jdbc:mysql://192.168.9.1:1521\",\"database\":\"im\",\"jdbcUrl\":\"jdbc:mysql://192.168.9.1:1521/im\",\"user\":\"test\",\"password\":\"123456\"}"; Assert.assertEquals(expected, param); } @@ -316,7 +328,7 @@ public class DataSourceServiceTest { @Test public void testCheckConnection() throws Exception { DbType dataSourceType = DbType.POSTGRESQL; - String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null); + String parameter = dataSourceService.buildParameter(dataSourceType, "172.16.133.200", "5432", "dolphinscheduler", null, "postgres", "", null, null, null, null, null); PowerMockito.mockStatic(DataSourceFactory.class); PowerMockito.when(DataSourceFactory.getDatasource(Mockito.any(), Mockito.anyString())).thenReturn(null); diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java index 4db533838c..7ebb40ecb0 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java @@ -657,7 +657,7 @@ public class ProcessDefinitionServiceTest { Mockito.when(processDefineMapper.verifyByDefineName(project.getId(), "test_pdf")).thenReturn(getProcessDefinition()); Map processExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser, "project_test1", "test_pdf"); - Assert.assertEquals(Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR, processExistRes.get(Constants.STATUS)); + Assert.assertEquals(Status.PROCESS_DEFINITION_NAME_EXIST, processExistRes.get(Constants.STATUS)); } @Test diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java index f0c7c40c49..45af3b2700 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java @@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; +import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.regex.Pattern; @@ -146,7 +147,7 @@ public final class Constants { */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; - public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties"; + public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state @@ -956,7 +957,9 @@ public final class Constants { public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; - + public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; + public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; + public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout @@ -1040,4 +1043,11 @@ public final class Constants { * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; + + /** + * docker & kubernetes + */ + public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER")); + public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); + } diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java index 094620aee5..b136ae295d 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java @@ -14,6 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.dolphinscheduler.common.task.conditions; import org.apache.dolphinscheduler.common.enums.DependentRelation; @@ -36,7 +37,6 @@ public class ConditionsParameters extends AbstractParameters { // node list to run when failed private List failedNode; - @Override public boolean checkParameters() { return true; @@ -78,4 +78,12 @@ public class ConditionsParameters extends AbstractParameters { public void setFailedNode(List failedNode) { this.failedNode = failedNode; } + + public String getConditionResult() { + return "{" + + "\"successNode\": [\"" + successNode.get(0) + + "\"],\"failedNode\": [\"" + failedNode.get(0) + + "\"]}"; + } + } diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java index 45c5aa2c93..cf307b402f 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java @@ -86,13 +86,26 @@ public class CommonUtils { * @throws Exception errors */ public static void loadKerberosConf() throws Exception { + loadKerberosConf(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH), + PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), + PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)); + } + + /** + * load kerberos configuration + * @param javaSecurityKrb5Conf javaSecurityKrb5Conf + * @param loginUserKeytabUsername loginUserKeytabUsername + * @param loginUserKeytabPath loginUserKeytabPath + * @throws Exception errors + */ + public static void loadKerberosConf(String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) throws Exception { if (CommonUtils.getKerberosStartupState()) { - System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)); + System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, StringUtils.defaultIfBlank(javaSecurityKrb5Conf, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH))); Configuration configuration = new Configuration(); configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, Constants.KERBEROS); UserGroupInformation.setConfiguration(configuration); - UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), - PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)); + UserGroupInformation.loginUserFromKeytab(StringUtils.defaultIfBlank(loginUserKeytabUsername, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)), + StringUtils.defaultIfBlank(loginUserKeytabPath, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH))); } } diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java index 80c0ed411c..a5312995ac 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java @@ -241,15 +241,50 @@ public class DateUtils { */ public static String format2Readable(long ms) { - long days = ms / (1000 * 60 * 60 * 24); - long hours = (ms % (1000 * 60 * 60 * 24)) / (1000 * 60 * 60); - long minutes = (ms % (1000 * 60 * 60)) / (1000 * 60); - long seconds = (ms % (1000 * 60)) / 1000; + long days = MILLISECONDS.toDays(ms); + long hours = MILLISECONDS.toDurationHours(ms); + long minutes = MILLISECONDS.toDurationMinutes(ms); + long seconds = MILLISECONDS.toDurationSeconds(ms); return String.format("%02d %02d:%02d:%02d", days, hours, minutes, seconds); } + /** + * + * format time to duration + * + * @param d1 d1 + * @param d2 d2 + * @return format time + */ + public static String format2Duration(Date d1, Date d2) { + return format2Duration(differMs(d1, d2)); + } + + /** + * format time to duration + * + * @param ms ms + * @return format time + */ + public static String format2Duration(long ms) { + + long days = MILLISECONDS.toDays(ms); + long hours = MILLISECONDS.toDurationHours(ms); + long minutes = MILLISECONDS.toDurationMinutes(ms); + long seconds = MILLISECONDS.toDurationSeconds(ms); + + StringBuilder strBuilder = new StringBuilder(); + strBuilder = days > 0 ? strBuilder.append(days).append("d").append(" ") : strBuilder; + strBuilder = hours > 0 ? strBuilder.append(hours).append("h").append(" ") : strBuilder; + strBuilder = minutes > 0 ? strBuilder.append(minutes).append("m").append(" ") : strBuilder; + strBuilder = seconds > 0 ? strBuilder.append(seconds).append("s") : strBuilder; + + return strBuilder.toString(); + + } + /** * get monday *

@@ -454,4 +489,47 @@ public class DateUtils { return getCurrentTime(Constants.YYYYMMDDHHMMSSSSS); } + static final long C0 = 1L; + static final long C1 = C0 * 1000L; + static final long C2 = C1 * 1000L; + static final long C3 = C2 * 1000L; + static final long C4 = C3 * 60L; + static final long C5 = C4 * 60L; + static final long C6 = C5 * 24L; + + /** + * Time unit representing one thousandth of a second + */ + public static class MILLISECONDS { + + public static long toSeconds(long d) { + return d / (C3 / C2); + } + + public static long toMinutes(long d) { + return d / (C4 / C2); + } + + public static long toHours(long d) { + return d / (C5 / C2); + } + + public static long toDays(long d) { + return d / (C6 / C2); + } + + public static long toDurationSeconds(long d) { + return (d % (C4 / C2)) / (C3 / C2); + } + + public static long toDurationMinutes(long d) { + return (d % (C5 / C2)) / (C4 / C2); + } + + public static long toDurationHours(long d) { + return (d % (C6 / C2)) / (C5 / C2); + } + + } + } diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java index 4ab04997d8..73af57929b 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java @@ -211,25 +211,13 @@ public class JSONUtils { /** * json to map - *

* {@link #toMap(String, Class, Class)} * * @param json json * @return json to map */ public static Map toMap(String json) { - if (StringUtils.isEmpty(json)) { - return null; - } - - try { - return objectMapper.readValue(json, new TypeReference>() { - }); - } catch (Exception e) { - logger.error("json to map exception!", e); - } - - return null; + return parseObject(json, new TypeReference>() {}); } /** @@ -243,13 +231,24 @@ public class JSONUtils { * @return to map */ public static Map toMap(String json, Class classK, Class classV) { + return parseObject(json, new TypeReference>() {}); + } + + /** + * json to object + * + * @param json json string + * @param type type reference + * @param + * @return return parse object + */ + public static T parseObject(String json, TypeReference type) { if (StringUtils.isEmpty(json)) { return null; } try { - return objectMapper.readValue(json, new TypeReference>() { - }); + return objectMapper.readValue(json, type); } catch (Exception e) { logger.error("json to map exception!", e); } diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java index ddb29730b7..6c761f3d00 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java @@ -49,8 +49,6 @@ public class NetUtils { private static final String NETWORK_PRIORITY_INNER = "inner"; private static final String NETWORK_PRIORITY_OUTER = "outer"; private static final Logger logger = LoggerFactory.getLogger(NetUtils.class); - private static final String ANY_HOST_VALUE = "0.0.0.0"; - private static final String LOCAL_HOST_VALUE = "127.0.0.1"; private static InetAddress LOCAL_ADDRESS = null; private static volatile String HOST_ADDRESS; @@ -58,6 +56,22 @@ public class NetUtils { throw new UnsupportedOperationException("Construct NetUtils"); } + /** + * get addr like host:port + * @return addr + */ + public static String getAddr(String host, int port) { + return String.format("%s:%d", host, port); + } + + /** + * get addr like host:port + * @return addr + */ + public static String getAddr(int port) { + return getAddr(getHost(), port); + } + public static String getHost() { if (HOST_ADDRESS != null) { return HOST_ADDRESS; @@ -65,10 +79,10 @@ public class NetUtils { InetAddress address = getLocalAddress(); if (address != null) { - HOST_ADDRESS = address.getHostAddress(); + HOST_ADDRESS = Constants.KUBERNETES_MODE ? address.getHostName() : address.getHostAddress(); return HOST_ADDRESS; } - return LOCAL_HOST_VALUE; + return Constants.KUBERNETES_MODE ? "localhost" : "127.0.0.1"; } private static InetAddress getLocalAddress() { @@ -153,8 +167,8 @@ public class NetUtils { String name = address.getHostAddress(); return (name != null && IP_PATTERN.matcher(name).matches() - && !ANY_HOST_VALUE.equals(name) - && !LOCAL_HOST_VALUE.equals(name)); + && !address.isAnyLocalAddress() + && !address.isLoopbackAddress()); } /** diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java index b48f2d30cc..768c0f654a 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java @@ -389,6 +389,16 @@ public class OSUtils { return null; } + /** + * get sudo command + * @param tenantCode tenantCode + * @param command command + * @return result of sudo execute command + */ + public static String getSudoCmd(String tenantCode, String command) { + return StringUtils.isEmpty(tenantCode) ? command : "sudo -u " + tenantCode + " " + command; + } + /** * Execute the corresponding command of Linux or Windows * diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java index 6e32d12df3..6bed928e14 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/StringUtils.java @@ -61,4 +61,12 @@ public class StringUtils { public static String trim(String str) { return str == null ? null : str.trim(); } + + public static String defaultIfBlank(String str, String defaultStr) { + return isBlank(str) ? defaultStr : str; + } + + public static boolean equalsIgnoreCase(String str1, String str2) { + return str1 == null ? str2 == null : str1.equalsIgnoreCase(str2); + } } diff --git a/dolphinscheduler-common/src/main/resources/common.properties b/dolphinscheduler-common/src/main/resources/common.properties index b8c21c853b..ed9a02739d 100644 --- a/dolphinscheduler-common/src/main/resources/common.properties +++ b/dolphinscheduler-common/src/main/resources/common.properties @@ -37,7 +37,7 @@ login.user.keytab.username=hdfs-mycluster@ESZ.COM login.user.keytab.path=/opt/hdfs.headless.keytab #resource.view.suffixs -#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties +#resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js # if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path hdfs.root.user=hdfs @@ -62,7 +62,7 @@ yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s # job history status url when application number threshold is reached(default 10000,maybe it was set to 1000) yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s -# system env path +# system env path, If you want to set your own path, you need to set this env file to an absolute path #dolphinscheduler.env.path=env/dolphinscheduler_env.sh development.state=false diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java index fa16446cd8..63f0be5906 100644 --- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java +++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/DateUtilsTest.java @@ -157,4 +157,43 @@ public class DateUtilsTest { Assert.assertNotNull(timeStamp); } + @Test + public void testFormat2Duration() { + + // days hours minutes seconds + Date d1 = DateUtils.stringToDate("2020-01-20 11:00:00"); + Date d2 = DateUtils.stringToDate("2020-01-21 12:10:10"); + String duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("1d 1h 10m 10s", duration); + + // hours minutes seconds + d1 = DateUtils.stringToDate("2020-01-20 11:00:00"); + d2 = DateUtils.stringToDate("2020-01-20 12:10:10"); + duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("1h 10m 10s", duration); + + // minutes seconds + d1 = DateUtils.stringToDate("2020-01-20 11:00:00"); + d2 = DateUtils.stringToDate("2020-01-20 11:10:10"); + duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("10m 10s", duration); + + // minutes seconds + d1 = DateUtils.stringToDate("2020-01-20 11:10:00"); + d2 = DateUtils.stringToDate("2020-01-20 11:10:10"); + duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("10s", duration); + + d1 = DateUtils.stringToDate("2020-01-20 11:10:00"); + d2 = DateUtils.stringToDate("2020-01-21 11:10:10"); + duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("1d 10s", duration); + + d1 = DateUtils.stringToDate("2020-01-20 11:10:00"); + d2 = DateUtils.stringToDate("2020-01-20 16:10:10"); + duration = DateUtils.format2Duration(d2, d1); + Assert.assertEquals("5h 10s", duration); + + } + } diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java index 59276295ae..d2461ecfeb 100644 --- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java +++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/NetUtilsTest.java @@ -29,6 +29,13 @@ import static org.mockito.Mockito.when; */ public class NetUtilsTest { + @Test + public void testGetAddr() { + assertEquals(NetUtils.getHost() + ":5678", NetUtils.getAddr(5678)); + assertEquals("127.0.0.1:5678", NetUtils.getAddr("127.0.0.1", 5678)); + assertEquals("localhost:1234", NetUtils.getAddr("localhost", 1234)); + } + @Test public void testGetLocalHost() { assertNotNull(NetUtils.getHost()); @@ -45,9 +52,11 @@ public class NetUtilsTest { assertFalse(NetUtils.isValidV4Address(address)); address = mock(InetAddress.class); when(address.getHostAddress()).thenReturn("0.0.0.0"); + when(address.isAnyLocalAddress()).thenReturn(true); assertFalse(NetUtils.isValidV4Address(address)); address = mock(InetAddress.class); when(address.getHostAddress()).thenReturn("127.0.0.1"); + when(address.isLoopbackAddress()).thenReturn(true); assertFalse(NetUtils.isValidV4Address(address)); address = mock(InetAddress.class); when(address.getHostAddress()).thenReturn("1.2.3.4"); diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java index 8f40bccdc8..83cacb758b 100644 --- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java +++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/OSUtilsTest.java @@ -75,6 +75,13 @@ public class OSUtilsTest { } } + @Test + public void testGetSudoCmd() { + String cmd = "kill -9 1234"; + String sudoCmd = OSUtils.getSudoCmd("test123", cmd); + Assert.assertEquals("sudo -u test123 " + cmd, sudoCmd); + } + @Test public void exeCmd() { if(OSUtils.isMacOS() || !OSUtils.isWindows()){ @@ -92,12 +99,6 @@ public class OSUtilsTest { Assert.assertNotEquals(0, processId); } @Test - public void getHost(){ - String host = NetUtils.getHost(); - Assert.assertNotNull(host); - Assert.assertNotEquals("", host); - } - @Test public void checkResource(){ boolean resource = OSUtils.checkResource(100,0); Assert.assertTrue(resource); diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java index f67e89e7e2..74b96aff02 100644 --- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java +++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringUtilsTest.java @@ -77,4 +77,22 @@ public class StringUtilsTest { String result4 = StringUtils.replaceNRTtoUnderline(null); Assert.assertNull(result4); } + + @Test + public void testTrim() { + String trim = StringUtils.trim(null); + Assert.assertNull(trim); + + trim = StringUtils.trim(" test "); + Assert.assertEquals("test", trim); + } + + @Test + public void testDefaultIfBlank() { + String defaultStr = StringUtils.defaultIfBlank("", "defaultStr"); + Assert.assertEquals("defaultStr", defaultStr); + + defaultStr = StringUtils.defaultIfBlank("test", "defaultStr"); + Assert.assertEquals("test", defaultStr); + } } diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java index ab82997bc2..93df36aadc 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java @@ -58,22 +58,22 @@ public class PluginDao extends AbstractBaseDao { * * @param pluginDefine new pluginDefine */ - public void addOrUpdatePluginDefine(PluginDefine pluginDefine) { + public int addOrUpdatePluginDefine(PluginDefine pluginDefine) { requireNonNull(pluginDefine, "pluginDefine is null"); requireNonNull(pluginDefine.getPluginName(), "pluginName is null"); requireNonNull(pluginDefine.getPluginType(), "pluginType is null"); List pluginDefineList = pluginDefineMapper.queryByNameAndType(pluginDefine.getPluginName(), pluginDefine.getPluginType()); if (pluginDefineList == null || pluginDefineList.size() == 0) { - pluginDefineMapper.insert(pluginDefine); - } else { - PluginDefine currPluginDefine = pluginDefineList.get(0); - if (!currPluginDefine.getPluginParams().equals(pluginDefine.getPluginParams())) { - currPluginDefine.setUpdateTime(pluginDefine.getUpdateTime()); - currPluginDefine.setPluginParams(pluginDefine.getPluginParams()); - pluginDefineMapper.updateById(currPluginDefine); - } + return pluginDefineMapper.insert(pluginDefine); } + PluginDefine currPluginDefine = pluginDefineList.get(0); + if (!currPluginDefine.getPluginParams().equals(pluginDefine.getPluginParams())) { + currPluginDefine.setUpdateTime(pluginDefine.getUpdateTime()); + currPluginDefine.setPluginParams(pluginDefine.getPluginParams()); + pluginDefineMapper.updateById(currPluginDefine); + } + return currPluginDefine.getId(); } /** diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java index bb3825fbf9..9bbbf32a6f 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java @@ -64,6 +64,21 @@ public abstract class BaseDataSource { */ private String principal; + /** + * java.security.krb5.conf + */ + private String javaSecurityKrb5Conf; + + /** + * login.user.keytab.username + */ + private String loginUserKeytabUsername; + + /** + * login.user.keytab.path + */ + private String loginUserKeytabPath; + public String getPrincipal() { return principal; } @@ -211,4 +226,27 @@ public abstract class BaseDataSource { this.other = other; } + public String getJavaSecurityKrb5Conf() { + return javaSecurityKrb5Conf; + } + + public void setJavaSecurityKrb5Conf(String javaSecurityKrb5Conf) { + this.javaSecurityKrb5Conf = javaSecurityKrb5Conf; + } + + public String getLoginUserKeytabUsername() { + return loginUserKeytabUsername; + } + + public void setLoginUserKeytabUsername(String loginUserKeytabUsername) { + this.loginUserKeytabUsername = loginUserKeytabUsername; + } + + public String getLoginUserKeytabPath() { + return loginUserKeytabPath; + } + + public void setLoginUserKeytabPath(String loginUserKeytabPath) { + this.loginUserKeytabPath = loginUserKeytabPath; + } } diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java index 3c087e7f31..bcf1cdf3d2 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java @@ -96,7 +96,7 @@ public class HiveDataSource extends BaseDataSource { */ @Override public Connection getConnection() throws Exception { - CommonUtils.loadKerberosConf(); + CommonUtils.loadKerberosConf(getJavaSecurityKrb5Conf(), getLoginUserKeytabUsername(), getLoginUserKeytabPath()); return super.getConnection(); } diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java index 207ed43942..df17bb564c 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java @@ -52,7 +52,7 @@ public class SparkDataSource extends BaseDataSource { */ @Override public Connection getConnection() throws Exception { - CommonUtils.loadKerberosConf(); + CommonUtils.loadKerberosConf(getJavaSecurityKrb5Conf(), getLoginUserKeytabUsername(), getLoginUserKeytabPath()); return super.getConnection(); } } diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/AlertPluginInstance.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/AlertPluginInstance.java index 5993697a56..202c06bf00 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/AlertPluginInstance.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/AlertPluginInstance.java @@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.dao.entity; import java.util.Date; +import com.baomidou.mybatisplus.annotation.FieldStrategy; import com.baomidou.mybatisplus.annotation.IdType; import com.baomidou.mybatisplus.annotation.TableField; import com.baomidou.mybatisplus.annotation.TableId; @@ -39,7 +40,7 @@ public class AlertPluginInstance { /** * plugin_define_id */ - @TableField("plugin_define_id") + @TableField(value = "plugin_define_id", updateStrategy = FieldStrategy.NEVER) private int pluginDefineId; /** diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java index f1d43a353b..03e81dc92d 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessInstance.java @@ -202,7 +202,7 @@ public class ProcessInstance { * @return */ @TableField(exist = false) - private Long duration; + private String duration; /** * process instance priority @@ -547,11 +547,11 @@ public class ProcessInstance { this.dependenceScheduleTimes = dependenceScheduleTimes; } - public Long getDuration() { + public String getDuration() { return duration; } - public void setDuration(Long duration) { + public void setDuration(String duration) { this.duration = duration; } diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java index b13ca87e38..ce8d6d58f5 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java @@ -14,6 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.dolphinscheduler.dao.entity; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; @@ -170,7 +171,7 @@ public class TaskInstance implements Serializable { * duration */ @TableField(exist = false) - private Long duration; + private String duration; /** * max retry times @@ -437,11 +438,11 @@ public class TaskInstance implements Serializable { this.processInstanceName = processInstanceName; } - public Long getDuration() { + public String getDuration() { return duration; } - public void setDuration(Long duration) { + public void setDuration(String duration) { this.duration = duration; } @@ -505,7 +506,6 @@ public class TaskInstance implements Serializable { return TaskType.CONDITIONS.equals(TaskType.valueOf(this.taskType)); } - /** * determine if you can try again * diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.java index b206a787b6..ebfa5b42ec 100644 --- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.java +++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.java @@ -61,6 +61,12 @@ public interface AlertGroupMapper extends BaseMapper { */ List queryAllGroupList(); + /** + * query instance ids All + * @return list + */ + List queryInstanceIdsList(); + /** * queryAlertGroupInstanceIdsById * @param alertGroupId diff --git a/dolphinscheduler-dao/src/main/resources/datasource.properties b/dolphinscheduler-dao/src/main/resources/datasource.properties index fa07c6a6d3..535b7493ce 100644 --- a/dolphinscheduler-dao/src/main/resources/datasource.properties +++ b/dolphinscheduler-dao/src/main/resources/datasource.properties @@ -14,12 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - -# mysql -#spring.datasource.driver-class-name=com.mysql.jdbc.Driver -#spring.datasource.url=jdbc:mysql://localhost:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 -#spring.datasource.username=root -#spring.datasource.password=123456 # postgresql spring.datasource.driver-class-name=org.postgresql.Driver @@ -33,7 +27,6 @@ spring.datasource.password=test #spring.datasource.username=xxxx #spring.datasource.password=xxxx - # connection configuration #spring.datasource.initialSize=5 # min connection number diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml index e60dedfd6c..90fd8a72a7 100644 --- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml +++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertGroupMapper.xml @@ -52,6 +52,13 @@ order by update_time desc + +