Browse Source

Fix jdbc registry cannot work (#15861)

3.2.2-release-bak
Wenjun Ruan 7 months ago committed by GitHub
parent
commit
163f8f01f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 12
      .github/workflows/backend.yml
  2. 6
      .github/workflows/cluster-test/mysql_with_mysql_registry/Dockerfile
  3. 44
      .github/workflows/cluster-test/mysql_with_mysql_registry/deploy.sh
  4. 34
      .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-base.yaml
  5. 0
      .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-cluster.yaml
  6. 58
      .github/workflows/cluster-test/mysql_with_mysql_registry/dolphinscheduler_env.sh
  7. 58
      .github/workflows/cluster-test/mysql_with_mysql_registry/install_env.sh
  8. 0
      .github/workflows/cluster-test/mysql_with_mysql_registry/running_test.sh
  9. 8
      .github/workflows/cluster-test/mysql_with_mysql_registry/start-job.sh
  10. 48
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/Dockerfile
  11. 0
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/deploy.sh
  12. 0
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-base.yaml
  13. 29
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-cluster.yaml
  14. 0
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/dolphinscheduler_env.sh
  15. 0
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/install_env.sh
  16. 108
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/running_test.sh
  17. 33
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/start-job.sh
  18. 39
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/Dockerfile
  19. 41
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/deploy.sh
  20. 35
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-base.yaml
  21. 0
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-cluster.yaml
  22. 58
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/dolphinscheduler_env.sh
  23. 58
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/install_env.sh
  24. 0
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/running_test.sh
  25. 33
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/start-job.sh
  26. 6
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/Dockerfile
  27. 0
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/deploy.sh
  28. 0
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-base.yaml
  29. 29
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-cluster.yaml
  30. 0
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/dolphinscheduler_env.sh
  31. 0
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/install_env.sh
  32. 109
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/running_test.sh
  33. 8
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/start-job.sh
  34. 19
      dolphinscheduler-alert/dolphinscheduler-alert-server/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java
  35. 6
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/ApiApplicationServer.java
  36. 4
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java
  37. 4
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/repository/impl/AlertDaoTest.java
  38. 6
      dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java
  39. 10
      dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/README.md
  40. 10
      dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/pom.xml
  41. 30
      dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/java/org/apache/dolphinscheduler/plugin/registry/jdbc/JdbcRegistryAutoConfiguration.java
  42. 19
      dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/resources/META-INF/spring.factories
  43. 1
      dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/resources/mysql_registry_init.sql
  44. 31
      dolphinscheduler-tools/src/main/bin/initialize-jdbc-registry.sh
  45. 152
      dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/command/CommandApplication.java
  46. 2
      dolphinscheduler-tools/src/main/resources/application.yaml
  47. 6
      dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java

12
.github/workflows/backend.yml

@ -106,10 +106,14 @@ jobs:
strategy:
matrix:
case:
- name: cluster-test-mysql
script: .github/workflows/cluster-test/mysql/start-job.sh
- name: cluster-test-postgresql
script: .github/workflows/cluster-test/postgresql/start-job.sh
- name: cluster-test-mysql-with-zookeeper-registry
script: .github/workflows/cluster-test/mysql_with_zookeeper_registry/start-job.sh
- name: cluster-test-mysql-with-mysql-registry
script: .github/workflows/cluster-test/mysql_with_mysql_registry/start-job.sh
- name: cluster-test-postgresql-zookeeper-registry
script: .github/workflows/cluster-test/postgresql_with_zookeeper_registry/start-job.sh
- name: cluster-test-postgresql-with-postgresql-registry
script: .github/workflows/cluster-test/postgresql_with_postgresql_registry/start-job.sh
steps:
- uses: actions/checkout@v2
with:

6
.github/workflows/cluster-test/mysql/Dockerfile → .github/workflows/cluster-test/mysql_with_mysql_registry/Dockerfile

@ -28,10 +28,10 @@ RUN mv /root/apache-dolphinscheduler-*-SNAPSHOT-bin /root/apache-dolphinschedule
ENV DOLPHINSCHEDULER_HOME /root/apache-dolphinscheduler-test-SNAPSHOT-bin
#Setting install.sh
COPY .github/workflows/cluster-test/mysql/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
COPY .github/workflows/cluster-test/mysql_with_mysql_registry/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
#Setting dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/mysql/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/mysql_with_mysql_registry/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
#Download mysql jar
ENV MYSQL_URL "https://repo.maven.apache.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar"
@ -43,6 +43,6 @@ cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME
cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/tools/libs/$MYSQL_DRIVER
#Deploy
COPY .github/workflows/cluster-test/mysql/deploy.sh /root/deploy.sh
COPY .github/workflows/cluster-test/mysql_with_mysql_registry/deploy.sh /root/deploy.sh
CMD [ "/bin/bash", "/root/deploy.sh" ]

44
.github/workflows/cluster-test/mysql_with_mysql_registry/deploy.sh

@ -0,0 +1,44 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -euox pipefail
USER=root
#Create database
mysql -hmysql -P3306 -uroot -p123456 -e "CREATE DATABASE IF NOT EXISTS dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;"
#Sudo
sed -i '$a'$USER' ALL=(ALL) NOPASSWD: NOPASSWD: ALL' /etc/sudoers
sed -i 's/Defaults requirett/#Defaults requirett/g' /etc/sudoers
#SSH
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
service ssh start
#Init schema
/bin/bash $DOLPHINSCHEDULER_HOME/tools/bin/upgrade-schema.sh
/bin/bash $DOLPHINSCHEDULER_HOME/tools/bin/initialize-jdbc-registry.sh
#Start Cluster
/bin/bash $DOLPHINSCHEDULER_HOME/bin/start-all.sh
#Keep running
tail -f /dev/null

34
.github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-base.yaml

@ -0,0 +1,34 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: "3"
services:
mysql:
container_name: mysql
image: mysql:5.7.36
command: --default-authentication-plugin=mysql_native_password
restart: always
environment:
MYSQL_ROOT_PASSWORD: 123456
ports:
- "3306:3306"
healthcheck:
test: mysqladmin ping -h 127.0.0.1 -u root --password=$$MYSQL_ROOT_PASSWORD
interval: 5s
timeout: 60s
retries: 120

0
.github/workflows/cluster-test/mysql/docker-compose-cluster.yaml → .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-cluster.yaml

58
.github/workflows/cluster-test/mysql_with_mysql_registry/dolphinscheduler_env.sh

@ -0,0 +1,58 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# JAVA_HOME, will use it to start DolphinScheduler server
export JAVA_HOME=${JAVA_HOME:-/opt/java/openjdk}
# Database related configuration, set database type, username and password
export DATABASE=${DATABASE:-mysql}
export SPRING_PROFILES_ACTIVE=${DATABASE}
export SPRING_DATASOURCE_URL="jdbc:mysql://mysql:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&useSSL=false"
export SPRING_DATASOURCE_USERNAME=root
export SPRING_DATASOURCE_PASSWORD=123456
# DolphinScheduler server related configuration
export SPRING_CACHE_TYPE=${SPRING_CACHE_TYPE:-none}
export SPRING_JACKSON_TIME_ZONE=${SPRING_JACKSON_TIME_ZONE:-UTC}
export MASTER_FETCH_COMMAND_NUM=${MASTER_FETCH_COMMAND_NUM:-10}
# Registry center configuration, determines the type and link of the registry center
export REGISTRY_TYPE=${REGISTRY_TYPE:-jdbc}
export REGISTRY_HIKARI_CONFIG_JDBC_URL="jdbc:mysql://mysql:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&useSSL=false"
export REGISTRY_HIKARI_CONFIG_USERNAME=root
export REGISTRY_HIKARI_CONFIG_PASSWORD=123456
# Tasks related configurations, need to change the configuration if you use the related tasks.
export HADOOP_HOME=${HADOOP_HOME:-/opt/soft/hadoop}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/opt/soft/hadoop/etc/hadoop}
export SPARK_HOME=${SPARK_HOME:-/opt/soft/spark}
export PYTHON_LAUNCHER=${PYTHON_LAUNCHER:-/opt/soft/python/bin/python3}
export HIVE_HOME=${HIVE_HOME:-/opt/soft/hive}
export FLINK_HOME=${FLINK_HOME:-/opt/soft/flink}
export DATAX_LAUNCHER=${DATAX_LAUNCHER:-/opt/soft/datax/bin/datax.py}
export PATH=$HADOOP_HOME/bin:$SPARK_HOME/bin:$PYTHON_LAUNCHER:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_LAUNCHER:$PATH
export MASTER_RESERVED_MEMORY=0.01
export WORKER_RESERVED_MEMORY=0.01
# applicationId auto collection related configuration, the following configurations are unnecessary if setting appId.collect=log
#export HADOOP_CLASSPATH=`hadoop classpath`:${DOLPHINSCHEDULER_HOME}/tools/libs/*
#export SPARK_DIST_CLASSPATH=$HADOOP_CLASSPATH:$SPARK_DIST_CLASS_PATH
#export HADOOP_CLIENT_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$HADOOP_CLIENT_OPTS
#export SPARK_SUBMIT_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$SPARK_SUBMIT_OPTS
#export FLINK_ENV_JAVA_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$FLINK_ENV_JAVA_OPTS

58
.github/workflows/cluster-test/mysql_with_mysql_registry/install_env.sh

@ -0,0 +1,58 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ---------------------------------------------------------
# INSTALL MACHINE
# ---------------------------------------------------------
# A comma separated list of machine hostname or IP would be installed DolphinScheduler,
# including master, worker, api, alert. If you want to deploy in pseudo-distributed
# mode, just write a pseudo-distributed hostname
# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"
ips=${ips:-"localhost"}
# Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine
# modify it if you use different ssh port
sshPort=${sshPort:-"22"}
# A comma separated list of machine hostname or IP would be installed Master server, it
# must be a subset of configuration `ips`.
# Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"
masters=${masters:-"localhost"}
# A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a
# subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts
# Example for hostnames: workers="ds1:default,ds2:default,ds3:default", Example for IPs: workers="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default"
workers=${workers:-"localhost:default"}
# A comma separated list of machine hostname or IP would be installed Alert server, it
# must be a subset of configuration `ips`.
# Example for hostname: alertServer="ds3", Example for IP: alertServer="192.168.8.3"
alertServer=${alertServer:-"localhost"}
# A comma separated list of machine hostname or IP would be installed API server, it
# must be a subset of configuration `ips`.
# Example for hostname: apiServers="ds1", Example for IP: apiServers="192.168.8.1"
apiServers=${apiServers:-"localhost"}
# The directory to install DolphinScheduler for all machine we config above. It will automatically be created by `install.sh` script if not exists.
# Do not set this configuration same as the current path (pwd)
installPath=${installPath:-"/root/apache-dolphinscheduler-*-SNAPSHOT-bin"}
# The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before running `install.sh`
# script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs
# to be created by this user
deployUser=${deployUser:-"dolphinscheduler"}

0
.github/workflows/cluster-test/mysql/running_test.sh → .github/workflows/cluster-test/mysql_with_mysql_registry/running_test.sh

8
.github/workflows/cluster-test/mysql/start-job.sh → .github/workflows/cluster-test/mysql_with_mysql_registry/start-job.sh

@ -18,16 +18,16 @@
set -euox pipefail
#Start base service containers
docker-compose -f .github/workflows/cluster-test/mysql/docker-compose-base.yaml up -d
docker-compose -f .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-base.yaml up -d
#Build ds mysql cluster image
docker build -t jdk8:ds_mysql_cluster -f .github/workflows/cluster-test/mysql/Dockerfile .
docker build -t jdk8:ds_mysql_cluster -f .github/workflows/cluster-test/mysql_with_mysql_registry/Dockerfile .
#Start ds mysql cluster container
docker-compose -f .github/workflows/cluster-test/mysql/docker-compose-cluster.yaml up -d
docker-compose -f .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-cluster.yaml up -d
#Running tests
/bin/bash .github/workflows/cluster-test/mysql/running_test.sh
/bin/bash .github/workflows/cluster-test/mysql_with_mysql_registry/running_test.sh
#Cleanup
docker rm -f $(docker ps -aq)

48
.github/workflows/cluster-test/mysql_with_zookeeper_registry/Dockerfile

@ -0,0 +1,48 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM eclipse-temurin:8-jre
RUN apt update ; \
apt install -y wget default-mysql-client sudo openssh-server netcat-traditional ;
COPY ./apache-dolphinscheduler-*-SNAPSHOT-bin.tar.gz /root
RUN tar -zxvf /root/apache-dolphinscheduler-*-SNAPSHOT-bin.tar.gz -C ~
RUN mv /root/apache-dolphinscheduler-*-SNAPSHOT-bin /root/apache-dolphinscheduler-test-SNAPSHOT-bin
ENV DOLPHINSCHEDULER_HOME /root/apache-dolphinscheduler-test-SNAPSHOT-bin
#Setting install.sh
COPY .github/workflows/cluster-test/mysql_with_zookeeper_registry/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
#Setting dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/mysql_with_zookeeper_registry/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
#Download mysql jar
ENV MYSQL_URL "https://repo.maven.apache.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar"
ENV MYSQL_DRIVER "mysql-connector-java-8.0.16.jar"
RUN wget -O $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $MYSQL_URL ; \
cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/api-server/libs/$MYSQL_DRIVER ; \
cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/master-server/libs/$MYSQL_DRIVER ; \
cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/worker-server/libs/$MYSQL_DRIVER ; \
cp $DOLPHINSCHEDULER_HOME/alert-server/libs/$MYSQL_DRIVER $DOLPHINSCHEDULER_HOME/tools/libs/$MYSQL_DRIVER
#Deploy
COPY .github/workflows/cluster-test/mysql_with_zookeeper_registry/deploy.sh /root/deploy.sh
CMD [ "/bin/bash", "/root/deploy.sh" ]

0
.github/workflows/cluster-test/mysql/deploy.sh → .github/workflows/cluster-test/mysql_with_zookeeper_registry/deploy.sh

0
.github/workflows/cluster-test/mysql/docker-compose-base.yaml → .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-base.yaml

29
.github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-cluster.yaml

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: "3"
services:
ds:
container_name: ds
image: jdk8:ds_mysql_cluster
restart: always
ports:
- "12345:12345"
- "5679:5679"
- "1235:1235"
- "50053:50053"

0
.github/workflows/cluster-test/mysql/dolphinscheduler_env.sh → .github/workflows/cluster-test/mysql_with_zookeeper_registry/dolphinscheduler_env.sh

0
.github/workflows/cluster-test/mysql/install_env.sh → .github/workflows/cluster-test/mysql_with_zookeeper_registry/install_env.sh

108
.github/workflows/cluster-test/mysql_with_zookeeper_registry/running_test.sh

@ -0,0 +1,108 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -x
API_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:12345/dolphinscheduler/actuator/health"
MASTER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:5679/actuator/health"
WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:1235/actuator/health"
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check
TIMEOUT=180
START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++))
do
MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
ALERT_HTTP_STATUS=$(eval "$ALERT_HEALTHCHECK_COMMAND")
if [[ $MASTER_HTTP_STATUS -eq 200 && $WORKER_HTTP_STATUS -eq 200 && $API_HTTP_STATUS -eq 200 && $ALERT_HTTP_STATUS -eq 200 ]];then
START_HEALTHCHECK_EXITCODE=0
else
START_HEALTHCHECK_EXITCODE=2
fi
if [[ $START_HEALTHCHECK_EXITCODE -eq 0 ]];then
echo "cluster start health check success"
break
fi
if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out"
echo "master start health check failed"
fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out"
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out"
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out"
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi
sleep 1
done
#Stop Cluster
docker exec -u root ds bash -c "/root/apache-dolphinscheduler-*-SNAPSHOT-bin/bin/stop-all.sh"
#Cluster stop health check
sleep 5
MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
echo "master stop health check success"
else
echo "master stop health check failed"
exit 3
fi
WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
if [[ $WORKER_HTTP_STATUS -ne 200 ]];then
echo "worker stop health check success"
else
echo "worker stop health check failed"
exit 3
fi
API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
if [[ $API_HTTP_STATUS -ne 200 ]];then
echo "api stop health check success"
else
echo "api stop health check failed"
exit 3
fi
ALERT_HTTP_STATUS=$(eval "$ALERT_HEALTHCHECK_COMMAND")
if [[ $ALERT_HTTP_STATUS -ne 200 ]];then
echo "alert stop health check success"
else
echo "alert stop health check failed"
exit 3
fi

33
.github/workflows/cluster-test/mysql_with_zookeeper_registry/start-job.sh

@ -0,0 +1,33 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -euox pipefail
#Start base service containers
docker-compose -f .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-base.yaml up -d
#Build ds mysql cluster image
docker build -t jdk8:ds_mysql_cluster -f .github/workflows/cluster-test/mysql_with_zookeeper_registry/Dockerfile .
#Start ds mysql cluster container
docker-compose -f .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-cluster.yaml up -d
#Running tests
/bin/bash .github/workflows/cluster-test/mysql_with_zookeeper_registry/running_test.sh
#Cleanup
docker rm -f $(docker ps -aq)

39
.github/workflows/cluster-test/postgresql_with_postgresql_registry/Dockerfile

@ -0,0 +1,39 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM eclipse-temurin:8-jre
RUN apt update ; \
apt install -y wget sudo openssh-server netcat-traditional ;
COPY ./apache-dolphinscheduler-*-SNAPSHOT-bin.tar.gz /root
RUN tar -zxvf /root/apache-dolphinscheduler-*-SNAPSHOT-bin.tar.gz -C ~
RUN mv /root/apache-dolphinscheduler-*-SNAPSHOT-bin /root/apache-dolphinscheduler-test-SNAPSHOT-bin
ENV DOLPHINSCHEDULER_HOME /root/apache-dolphinscheduler-test-SNAPSHOT-bin
#Setting install.sh
COPY .github/workflows/cluster-test/postgresql_with_postgresql_registry/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
#Setting dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/postgresql_with_postgresql_registry/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
#Deploy
COPY .github/workflows/cluster-test/postgresql_with_postgresql_registry/deploy.sh /root/deploy.sh
CMD [ "/bin/bash", "/root/deploy.sh" ]

41
.github/workflows/cluster-test/postgresql_with_postgresql_registry/deploy.sh

@ -0,0 +1,41 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -euox pipefail
USER=root
#Sudo
sed -i '$a'$USER' ALL=(ALL) NOPASSWD: NOPASSWD: ALL' /etc/sudoers
sed -i 's/Defaults requirett/#Defaults requirett/g' /etc/sudoers
#SSH
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
service ssh start
#Init schema
/bin/bash $DOLPHINSCHEDULER_HOME/tools/bin/upgrade-schema.sh
/bin/bash $DOLPHINSCHEDULER_HOME/tools/bin/initialize-jdbc-registry.sh
#Start Cluster
/bin/bash $DOLPHINSCHEDULER_HOME/bin/start-all.sh
#Keep running
tail -f /dev/null

35
.github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-base.yaml

@ -0,0 +1,35 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: "3"
services:
postgres:
container_name: postgres
image: postgres:14.1
restart: always
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: dolphinscheduler
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 60s
retries: 120

0
.github/workflows/cluster-test/postgresql/docker-compose-cluster.yaml → .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-cluster.yaml

58
.github/workflows/cluster-test/postgresql_with_postgresql_registry/dolphinscheduler_env.sh

@ -0,0 +1,58 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# JAVA_HOME, will use it to start DolphinScheduler server
export JAVA_HOME=${JAVA_HOME:-/opt/java/openjdk}
# Database related configuration, set database type, username and password
export DATABASE=${DATABASE:-postgresql}
export SPRING_PROFILES_ACTIVE=${DATABASE}
export SPRING_DATASOURCE_URL="jdbc:postgresql://postgres:5432/dolphinscheduler"
export SPRING_DATASOURCE_USERNAME=postgres
export SPRING_DATASOURCE_PASSWORD=postgres
# DolphinScheduler server related configuration
export SPRING_CACHE_TYPE=${SPRING_CACHE_TYPE:-none}
export SPRING_JACKSON_TIME_ZONE=${SPRING_JACKSON_TIME_ZONE:-UTC}
export MASTER_FETCH_COMMAND_NUM=${MASTER_FETCH_COMMAND_NUM:-10}
# Registry center configuration, determines the type and link of the registry center
export REGISTRY_TYPE=jdbc
export REGISTRY_HIKARI_CONFIG_JDBC_URL="jdbc:postgresql://postgres:5432/dolphinscheduler"
export REGISTRY_HIKARI_CONFIG_USERNAME=postgres
export REGISTRY_HIKARI_CONFIG_PASSWORD=postgres
# Tasks related configurations, need to change the configuration if you use the related tasks.
export HADOOP_HOME=${HADOOP_HOME:-/opt/soft/hadoop}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/opt/soft/hadoop/etc/hadoop}
export SPARK_HOME=${SPARK_HOME:-/opt/soft/spark}
export PYTHON_LAUNCHER=${PYTHON_LAUNCHER:-/opt/soft/python/bin/python3}
export HIVE_HOME=${HIVE_HOME:-/opt/soft/hive}
export FLINK_HOME=${FLINK_HOME:-/opt/soft/flink}
export DATAX_LAUNCHER=${DATAX_LAUNCHER:-/opt/soft/datax/bin/datax.py}
export PATH=$HADOOP_HOME/bin:$SPARK_HOME/bin:$PYTHON_LAUNCHER:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_LAUNCHER:$PATH
export MASTER_RESERVED_MEMORY=0.01
export WORKER_RESERVED_MEMORY=0.01
# applicationId auto collection related configuration, the following configurations are unnecessary if setting appId.collect=log
#export HADOOP_CLASSPATH=`hadoop classpath`:${DOLPHINSCHEDULER_HOME}/tools/libs/*
#export SPARK_DIST_CLASSPATH=$HADOOP_CLASSPATH:$SPARK_DIST_CLASS_PATH
#export HADOOP_CLIENT_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$HADOOP_CLIENT_OPTS
#export SPARK_SUBMIT_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$SPARK_SUBMIT_OPTS
#export FLINK_ENV_JAVA_OPTS="-javaagent:${DOLPHINSCHEDULER_HOME}/tools/libs/aspectjweaver-1.9.7.jar":$FLINK_ENV_JAVA_OPTS

58
.github/workflows/cluster-test/postgresql_with_postgresql_registry/install_env.sh

@ -0,0 +1,58 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ---------------------------------------------------------
# INSTALL MACHINE
# ---------------------------------------------------------
# A comma separated list of machine hostname or IP would be installed DolphinScheduler,
# including master, worker, api, alert. If you want to deploy in pseudo-distributed
# mode, just write a pseudo-distributed hostname
# Example for hostnames: ips="ds1,ds2,ds3,ds4,ds5", Example for IPs: ips="192.168.8.1,192.168.8.2,192.168.8.3,192.168.8.4,192.168.8.5"
ips=${ips:-"localhost"}
# Port of SSH protocol, default value is 22. For now we only support same port in all `ips` machine
# modify it if you use different ssh port
sshPort=${sshPort:-"22"}
# A comma separated list of machine hostname or IP would be installed Master server, it
# must be a subset of configuration `ips`.
# Example for hostnames: masters="ds1,ds2", Example for IPs: masters="192.168.8.1,192.168.8.2"
masters=${masters:-"localhost"}
# A comma separated list of machine <hostname>:<workerGroup> or <IP>:<workerGroup>.All hostname or IP must be a
# subset of configuration `ips`, And workerGroup have default value as `default`, but we recommend you declare behind the hosts
# Example for hostnames: workers="ds1:default,ds2:default,ds3:default", Example for IPs: workers="192.168.8.1:default,192.168.8.2:default,192.168.8.3:default"
workers=${workers:-"localhost:default"}
# A comma separated list of machine hostname or IP would be installed Alert server, it
# must be a subset of configuration `ips`.
# Example for hostname: alertServer="ds3", Example for IP: alertServer="192.168.8.3"
alertServer=${alertServer:-"localhost"}
# A comma separated list of machine hostname or IP would be installed API server, it
# must be a subset of configuration `ips`.
# Example for hostname: apiServers="ds1", Example for IP: apiServers="192.168.8.1"
apiServers=${apiServers:-"localhost"}
# The directory to install DolphinScheduler for all machine we config above. It will automatically be created by `install.sh` script if not exists.
# Do not set this configuration same as the current path (pwd)
installPath=${installPath:-"/root/apache-dolphinscheduler-*-SNAPSHOT-bin"}
# The user to deploy DolphinScheduler for all machine we config above. For now user must create by yourself before running `install.sh`
# script. The user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled than the root directory needs
# to be created by this user
deployUser=${deployUser:-"dolphinscheduler"}

0
.github/workflows/cluster-test/postgresql/running_test.sh → .github/workflows/cluster-test/postgresql_with_postgresql_registry/running_test.sh

33
.github/workflows/cluster-test/postgresql_with_postgresql_registry/start-job.sh

@ -0,0 +1,33 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -euox pipefail
#Start base service containers
docker-compose -f .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-base.yaml up -d
#Build ds postgresql cluster image
docker build -t jdk8:ds_postgresql_cluster -f .github/workflows/cluster-test/postgresql_with_postgresql_registry/Dockerfile .
#Start ds postgresql cluster container
docker-compose -f .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-cluster.yaml up -d
#Running tests
/bin/bash .github/workflows/cluster-test/postgresql_with_postgresql_registry/running_test.sh
#Cleanup
docker rm -f $(docker ps -aq)

6
.github/workflows/cluster-test/postgresql/Dockerfile → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/Dockerfile

@ -28,12 +28,12 @@ RUN mv /root/apache-dolphinscheduler-*-SNAPSHOT-bin /root/apache-dolphinschedule
ENV DOLPHINSCHEDULER_HOME /root/apache-dolphinscheduler-test-SNAPSHOT-bin
#Setting install.sh
COPY .github/workflows/cluster-test/postgresql/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
COPY .github/workflows/cluster-test/postgresql_with_zookeeper_registry/install_env.sh $DOLPHINSCHEDULER_HOME/bin/env/install_env.sh
#Setting dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/postgresql/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
COPY .github/workflows/cluster-test/postgresql_with_zookeeper_registry/dolphinscheduler_env.sh $DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh
#Deploy
COPY .github/workflows/cluster-test/postgresql/deploy.sh /root/deploy.sh
COPY .github/workflows/cluster-test/postgresql_with_zookeeper_registry/deploy.sh /root/deploy.sh
CMD [ "/bin/bash", "/root/deploy.sh" ]

0
.github/workflows/cluster-test/postgresql/deploy.sh → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/deploy.sh

0
.github/workflows/cluster-test/postgresql/docker-compose-base.yaml → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-base.yaml

29
.github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-cluster.yaml

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: "3"
services:
ds:
container_name: ds
image: jdk8:ds_postgresql_cluster
restart: always
ports:
- "12345:12345"
- "5679:5679"
- "1235:1235"
- "50053:50053"

0
.github/workflows/cluster-test/postgresql/dolphinscheduler_env.sh → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/dolphinscheduler_env.sh

0
.github/workflows/cluster-test/postgresql/install_env.sh → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/install_env.sh

109
.github/workflows/cluster-test/postgresql_with_zookeeper_registry/running_test.sh

@ -0,0 +1,109 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -x
API_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:12345/dolphinscheduler/actuator/health"
MASTER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:5679/actuator/health"
WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:1235/actuator/health"
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check
TIMEOUT=180
START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++))
do
MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
ALERT_HTTP_STATUS=$(eval "$ALERT_HEALTHCHECK_COMMAND")
if [[ $MASTER_HTTP_STATUS -eq 200 && $WORKER_HTTP_STATUS -eq 200 && $API_HTTP_STATUS -eq 200 && $ALERT_HTTP_STATUS -eq 200 ]];then
START_HEALTHCHECK_EXITCODE=0
else
START_HEALTHCHECK_EXITCODE=2
fi
if [[ $START_HEALTHCHECK_EXITCODE -eq 0 ]];then
echo "cluster start health check success"
break
fi
if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out"
echo "master start health check failed"
fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out"
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out"
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out"
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi
sleep 1
done
#Stop Cluster
docker exec -u root ds bash -c "/root/apache-dolphinscheduler-*-SNAPSHOT-bin/bin/stop-all.sh"
#Cluster stop health check
sleep 5
MASTER_HTTP_STATUS=$(eval "$MASTER_HEALTHCHECK_COMMAND")
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
echo "master stop health check success"
else
echo "master stop health check failed"
exit 3
fi
WORKER_HTTP_STATUS=$(eval "$WORKER_HEALTHCHECK_COMMAND")
if [[ $WORKER_HTTP_STATUS -ne 200 ]];then
echo "worker stop health check success"
else
echo "worker stop health check failed"
exit 3
fi
API_HTTP_STATUS=$(eval "$API_HEALTHCHECK_COMMAND")
if [[ $API_HTTP_STATUS -ne 200 ]];then
echo "api stop health check success"
else
echo "api stop health check failed"
exit 3
fi
ALERT_HTTP_STATUS=$(eval "$ALERT_HEALTHCHECK_COMMAND")
if [[ $ALERT_HTTP_STATUS -ne 200 ]];then
echo "alert stop health check success"
else
echo "alert stop health check failed"
exit 3
fi

8
.github/workflows/cluster-test/postgresql/start-job.sh → .github/workflows/cluster-test/postgresql_with_zookeeper_registry/start-job.sh

@ -18,16 +18,16 @@
set -euox pipefail
#Start base service containers
docker-compose -f .github/workflows/cluster-test/postgresql/docker-compose-base.yaml up -d
docker-compose -f .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-base.yaml up -d
#Build ds postgresql cluster image
docker build -t jdk8:ds_postgresql_cluster -f .github/workflows/cluster-test/postgresql/Dockerfile .
docker build -t jdk8:ds_postgresql_cluster -f .github/workflows/cluster-test/postgresql_with_zookeeper_registry/Dockerfile .
#Start ds postgresql cluster container
docker-compose -f .github/workflows/cluster-test/postgresql/docker-compose-cluster.yaml up -d
docker-compose -f .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-cluster.yaml up -d
#Running tests
/bin/bash .github/workflows/cluster-test/postgresql/running_test.sh
/bin/bash .github/workflows/cluster-test/postgresql_with_zookeeper_registry/running_test.sh
#Cleanup
docker rm -f $(docker ps -aq)

19
dolphinscheduler-alert/dolphinscheduler-alert-server/src/main/java/org/apache/dolphinscheduler/alert/AlertServer.java

@ -27,21 +27,24 @@ import org.apache.dolphinscheduler.common.constants.Constants;
import org.apache.dolphinscheduler.common.lifecycle.ServerLifeCycleManager;
import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.plugin.registry.jdbc.JdbcRegistryAutoConfiguration;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.event.EventListener;
import org.springframework.context.annotation.FilterType;
@SpringBootApplication
@ComponentScan("org.apache.dolphinscheduler")
@Slf4j
@SpringBootApplication
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.ASSIGNABLE_TYPE, value = JdbcRegistryAutoConfiguration.class)
})
public class AlertServer {
@Autowired
@ -59,11 +62,11 @@ public class AlertServer {
AlertServerMetrics.registerUncachedException(DefaultUncaughtExceptionHandler::getUncaughtExceptionCount);
Thread.setDefaultUncaughtExceptionHandler(DefaultUncaughtExceptionHandler.getInstance());
Thread.currentThread().setName(Constants.THREAD_NAME_ALERT_SERVER);
new SpringApplicationBuilder(AlertServer.class).run(args);
SpringApplication.run(AlertServer.class, args);
}
@EventListener
public void run(ApplicationReadyEvent readyEvent) {
@PostConstruct
public void run() {
log.info("Alert server is staring ...");
alertPluginManager.start();
alertRegistryClient.start();

6
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/ApiApplicationServer.java

@ -22,6 +22,7 @@ import org.apache.dolphinscheduler.common.enums.PluginType;
import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler;
import org.apache.dolphinscheduler.dao.PluginDao;
import org.apache.dolphinscheduler.dao.entity.PluginDefine;
import org.apache.dolphinscheduler.plugin.registry.jdbc.JdbcRegistryAutoConfiguration;
import org.apache.dolphinscheduler.plugin.task.api.TaskChannelFactory;
import org.apache.dolphinscheduler.plugin.task.api.TaskPluginManager;
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
@ -38,11 +39,14 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.context.event.ApplicationReadyEvent;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.context.event.EventListener;
@ServletComponentScan
@SpringBootApplication
@ComponentScan("org.apache.dolphinscheduler")
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.ASSIGNABLE_TYPE, value = JdbcRegistryAutoConfiguration.class)
})
@Slf4j
public class ApiApplicationServer {

4
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/PluginDao.java

@ -29,10 +29,10 @@ import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Repository;
@Slf4j
@Component
@Repository
public class PluginDao {
@Autowired

4
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/AlertDaoTest.java → dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/repository/impl/AlertDaoTest.java

@ -15,10 +15,12 @@
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao;
package org.apache.dolphinscheduler.dao.repository.impl;
import org.apache.dolphinscheduler.common.enums.AlertStatus;
import org.apache.dolphinscheduler.common.enums.ProfileType;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.DaoConfiguration;
import org.apache.dolphinscheduler.dao.entity.Alert;
import java.util.List;

6
dolphinscheduler-master/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java

@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.meter.metrics.MetricsProvider;
import org.apache.dolphinscheduler.meter.metrics.SystemMetrics;
import org.apache.dolphinscheduler.plugin.registry.jdbc.JdbcRegistryAutoConfiguration;
import org.apache.dolphinscheduler.plugin.task.api.TaskPluginManager;
import org.apache.dolphinscheduler.scheduler.api.SchedulerApi;
import org.apache.dolphinscheduler.server.master.metrics.MasterServerMetrics;
@ -46,10 +47,13 @@ import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.transaction.annotation.EnableTransactionManagement;
@SpringBootApplication
@ComponentScan("org.apache.dolphinscheduler")
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.ASSIGNABLE_TYPE, value = JdbcRegistryAutoConfiguration.class)
})
@EnableTransactionManagement
@EnableCaching
@Slf4j

10
dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/README.md

@ -1,6 +1,7 @@
# Introduction
This module is the jdbc registry plugin module, this plugin will use jdbc as the registry center. Will use the database configuration same as DolphinScheduler in api'yaml default.
This module is the jdbc registry plugin module, this plugin will use jdbc as the registry center. Will use the database
configuration same as DolphinScheduler in api'yaml default.
# How to use
@ -22,8 +23,11 @@ registry:
After do this two steps, you can start your DolphinScheduler cluster, your cluster will use mysql as registry center to
store server metadata.
NOTE: You need to add `mysql-connector-java.jar` into DS classpath if you use mysql database, since this plugin will not bundle this driver in distribution.
You can get the detail about <a href="https://dolphinscheduler.apache.org/en-us/docs/3.1.2/guide/installation/pseudo-cluster">Initialize the Database</a>.
NOTE: You need to add `mysql-connector-java.jar` into DS classpath if you use mysql database, since this plugin will not
bundle this driver in distribution.
You can get the detail
about <a href="https://dolphinscheduler.apache.org/en-us/docs/3.1.2/guide/installation/pseudo-cluster">Initialize the
Database</a>.
## Optional configuration

10
dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/pom.xml

@ -62,6 +62,16 @@
<artifactId>mybatis-plus</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-to-slf4j</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</project>

30
dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/java/org/apache/dolphinscheduler/plugin/registry/jdbc/JdbcRegistryConfiguration.java → dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/java/org/apache/dolphinscheduler/plugin/registry/jdbc/JdbcRegistryAutoConfiguration.java

@ -22,40 +22,56 @@ import org.apache.dolphinscheduler.plugin.registry.jdbc.mapper.JdbcRegistryLockM
import org.apache.ibatis.session.SqlSessionFactory;
import lombok.extern.slf4j.Slf4j;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.autoconfigure.AutoConfigureAfter;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.baomidou.mybatisplus.autoconfigure.MybatisPlusAutoConfiguration;
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
import com.zaxxer.hikari.HikariDataSource;
@Configuration
@Slf4j
@Configuration(proxyBeanMethods = false)
@MapperScan("org.apache.dolphinscheduler.plugin.registry.jdbc.mapper")
@ConditionalOnProperty(prefix = "registry", name = "type", havingValue = "jdbc")
public class JdbcRegistryConfiguration {
@AutoConfigureAfter(MybatisPlusAutoConfiguration.class)
public class JdbcRegistryAutoConfiguration {
public JdbcRegistryAutoConfiguration() {
log.info("Load JdbcRegistryAutoConfiguration");
}
@Bean
@ConditionalOnProperty(prefix = "registry.hikari-config", name = "jdbc-url")
public SqlSessionFactory jdbcRegistrySqlSessionFactory(JdbcRegistryProperties jdbcRegistryProperties) throws Exception {
@ConditionalOnMissingBean
public SqlSessionFactory sqlSessionFactory(JdbcRegistryProperties jdbcRegistryProperties) throws Exception {
log.info("Initialize jdbcRegistrySqlSessionFactory");
MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
sqlSessionFactoryBean.setDataSource(new HikariDataSource(jdbcRegistryProperties.getHikariConfig()));
return sqlSessionFactoryBean.getObject();
}
@Bean
public SqlSessionTemplate jdbcRegistrySqlSessionTemplate(SqlSessionFactory jdbcRegistrySqlSessionFactory) {
jdbcRegistrySqlSessionFactory.getConfiguration().addMapper(JdbcRegistryDataMapper.class);
jdbcRegistrySqlSessionFactory.getConfiguration().addMapper(JdbcRegistryLockMapper.class);
@ConditionalOnMissingBean
public SqlSessionTemplate sqlSessionTemplate(SqlSessionFactory jdbcRegistrySqlSessionFactory) {
log.info("Initialize jdbcRegistrySqlSessionTemplate");
return new SqlSessionTemplate(jdbcRegistrySqlSessionFactory);
}
@Bean
public JdbcRegistryDataMapper jdbcRegistryDataMapper(SqlSessionTemplate jdbcRegistrySqlSessionTemplate) {
jdbcRegistrySqlSessionTemplate.getConfiguration().addMapper(JdbcRegistryDataMapper.class);
return jdbcRegistrySqlSessionTemplate.getMapper(JdbcRegistryDataMapper.class);
}
@Bean
public JdbcRegistryLockMapper jdbcRegistryLockMapper(SqlSessionTemplate jdbcRegistrySqlSessionTemplate) {
jdbcRegistrySqlSessionTemplate.getConfiguration().addMapper(JdbcRegistryLockMapper.class);
return jdbcRegistrySqlSessionTemplate.getMapper(JdbcRegistryLockMapper.class);
}

19
dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/resources/META-INF/spring.factories

@ -0,0 +1,19 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
org.apache.dolphinscheduler.plugin.registry.jdbc.JdbcRegistryAutoConfiguration

1
dolphinscheduler-registry/dolphinscheduler-registry-plugins/dolphinscheduler-registry-jdbc/src/main/resources/mysql_registry_init.sql

@ -15,7 +15,6 @@
* limitations under the License.
*/
SET FOREIGN_KEY_CHECKS = 0;
DROP TABLE IF EXISTS `t_ds_jdbc_registry_data`;
CREATE TABLE `t_ds_jdbc_registry_data`

31
dolphinscheduler-tools/src/main/bin/initialize-jdbc-registry.sh

@ -0,0 +1,31 @@
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BIN_DIR=$(dirname $0)
DOLPHINSCHEDULER_HOME=${DOLPHINSCHEDULER_HOME:-$(cd $BIN_DIR/../..; pwd)}
if [ "$DOCKER" != "true" ]; then
source "$DOLPHINSCHEDULER_HOME/bin/env/dolphinscheduler_env.sh"
fi
JAVA_OPTS=${JAVA_OPTS:-"-server -Duser.timezone=${SPRING_JACKSON_TIME_ZONE} -Xms1g -Xmx1g -Xmn512m -XX:+PrintGCDetails -Xloggc:gc.log -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=dump.hprof"}
$JAVA_HOME/bin/java $JAVA_OPTS \
-cp "$DOLPHINSCHEDULER_HOME/tools/conf":"$DOLPHINSCHEDULER_HOME/tools/libs/*":"$DOLPHINSCHEDULER_HOME/tools/sql" \
-Dspring.profiles.active=${DATABASE} \
org.apache.dolphinscheduler.tools.command.CommandApplication

152
dolphinscheduler-tools/src/main/java/org/apache/dolphinscheduler/tools/command/CommandApplication.java

@ -0,0 +1,152 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.tools.command;
import org.apache.dolphinscheduler.dao.DaoConfiguration;
import org.apache.dolphinscheduler.dao.plugin.api.dialect.DatabaseDialect;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import javax.sql.DataSource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.ImportAutoConfiguration;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.stereotype.Component;
import com.baomidou.mybatisplus.annotation.DbType;
// todo: use spring-shell to manage the command
@SpringBootApplication
@ImportAutoConfiguration(DaoConfiguration.class)
public class CommandApplication {
public static void main(String[] args) {
SpringApplication.run(CommandApplication.class, args);
}
@Component
@Slf4j
static class JdbcRegistrySchemaInitializeCommand implements CommandLineRunner {
@Autowired
private DatabaseDialect databaseDialect;
@Autowired
private DbType dbType;
@Autowired
private DataSource dataSource;
JdbcRegistrySchemaInitializeCommand() {
}
@Override
public void run(String... args) throws Exception {
if (databaseDialect.tableExists("t_ds_jdbc_registry_data")
|| databaseDialect.tableExists("t_ds_jdbc_registry_lock")) {
log.warn("t_ds_jdbc_registry_data/t_ds_jdbc_registry_lock already exists");
return;
}
if (dbType == DbType.MYSQL) {
jdbcRegistrySchemaInitializeInMysql();
} else if (dbType == DbType.POSTGRE_SQL) {
jdbcRegistrySchemaInitializeInPG();
} else {
log.error("Unsupported database type: {}", dbType);
}
}
private void jdbcRegistrySchemaInitializeInMysql() throws SQLException {
try (
Connection connection = dataSource.getConnection();
Statement statement = connection.createStatement()) {
statement.execute("CREATE TABLE `t_ds_jdbc_registry_data`\n" +
"(\n" +
" `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'primary key',\n" +
" `data_key` varchar(256) NOT NULL COMMENT 'key, like zookeeper node path',\n" +
" `data_value` text NOT NULL COMMENT 'data, like zookeeper node value',\n" +
" `data_type` tinyint(4) NOT NULL COMMENT '1: ephemeral node, 2: persistent node',\n"
+
" `last_term` bigint NOT NULL COMMENT 'last term time',\n" +
" `last_update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'last update time',\n"
+
" `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',\n"
+
" PRIMARY KEY (`id`),\n" +
" unique (`data_key`)\n" +
") ENGINE = InnoDB\n" +
" DEFAULT CHARSET = utf8;");
statement.execute("CREATE TABLE `t_ds_jdbc_registry_lock`\n" +
"(\n" +
" `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'primary key',\n" +
" `lock_key` varchar(256) NOT NULL COMMENT 'lock path',\n" +
" `lock_owner` varchar(256) NOT NULL COMMENT 'the lock owner, ip_processId',\n" +
" `last_term` bigint NOT NULL COMMENT 'last term time',\n" +
" `last_update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'last update time',\n"
+
" `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',\n"
+
" PRIMARY KEY (`id`),\n" +
" unique (`lock_key`)\n" +
") ENGINE = InnoDB\n" +
" DEFAULT CHARSET = utf8;");
}
}
private void jdbcRegistrySchemaInitializeInPG() throws SQLException {
try (
Connection connection = dataSource.getConnection();
Statement statement = connection.createStatement()) {
statement.execute("create table t_ds_jdbc_registry_data\n" +
"(\n" +
" id serial\n" +
" constraint t_ds_jdbc_registry_data_pk primary key,\n" +
" data_key varchar not null,\n" +
" data_value text not null,\n" +
" data_type int4 not null,\n" +
" last_term bigint not null,\n" +
" last_update_time timestamp default current_timestamp not null,\n" +
" create_time timestamp default current_timestamp not null\n" +
");");
statement.execute(
"create unique index t_ds_jdbc_registry_data_key_uindex on t_ds_jdbc_registry_data (data_key);");
statement.execute("create table t_ds_jdbc_registry_lock\n" +
"(\n" +
" id serial\n" +
" constraint t_ds_jdbc_registry_lock_pk primary key,\n" +
" lock_key varchar not null,\n" +
" lock_owner varchar not null,\n" +
" last_term bigint not null,\n" +
" last_update_time timestamp default current_timestamp not null,\n" +
" create_time timestamp default current_timestamp not null\n" +
");");
statement.execute(
"create unique index t_ds_jdbc_registry_lock_key_uindex on t_ds_jdbc_registry_lock (lock_key);");
}
}
}
}

2
dolphinscheduler-tools/src/main/resources/application.yaml

@ -63,6 +63,8 @@ spring:
datasource:
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
username: root
password: root
---
spring:

6
dolphinscheduler-worker/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java

@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.common.thread.DefaultUncaughtExceptionHandler
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.meter.metrics.MetricsProvider;
import org.apache.dolphinscheduler.meter.metrics.SystemMetrics;
import org.apache.dolphinscheduler.plugin.registry.jdbc.JdbcRegistryAutoConfiguration;
import org.apache.dolphinscheduler.plugin.task.api.TaskExecutionContext;
import org.apache.dolphinscheduler.plugin.task.api.TaskPluginManager;
import org.apache.dolphinscheduler.plugin.task.api.utils.LogUtils;
@ -47,11 +48,14 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.transaction.annotation.EnableTransactionManagement;
@SpringBootApplication
@EnableTransactionManagement
@ComponentScan(basePackages = "org.apache.dolphinscheduler")
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.ASSIGNABLE_TYPE, value = JdbcRegistryAutoConfiguration.class)
})
@Slf4j
public class WorkerServer implements IStoppable {

Loading…
Cancel
Save