Browse Source

[Chore] Improve ci check (#16294)

* improve ci check
dev
xiangzihao 4 months ago committed by GitHub
parent
commit
bc256a18e6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      .github/workflows/api-test.yml
  2. 6
      .github/workflows/backend.yml
  3. 2
      .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-base.yaml
  4. 5
      .github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-cluster.yaml
  5. 18
      .github/workflows/cluster-test/mysql_with_mysql_registry/running_test.sh
  6. 8
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-base.yaml
  7. 5
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-cluster.yaml
  8. 18
      .github/workflows/cluster-test/mysql_with_zookeeper_registry/running_test.sh
  9. 5
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-cluster.yaml
  10. 45
      .github/workflows/cluster-test/postgresql_with_postgresql_registry/running_test.sh
  11. 6
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-base.yaml
  12. 5
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-cluster.yaml
  13. 45
      .github/workflows/cluster-test/postgresql_with_zookeeper_registry/running_test.sh
  14. 4
      .github/workflows/e2e.yml
  15. 2
      .github/workflows/publish-docker.yaml
  16. 25
      .github/workflows/unit-test.yml
  17. 3
      pom.xml

4
.github/workflows/api-test.yml

@ -59,7 +59,7 @@ jobs:
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-api-test key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-api-test
@ -110,7 +110,7 @@ jobs:
with: with:
submodules: true submodules: true
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-api-test key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-api-test

6
.github/workflows/backend.yml

@ -75,7 +75,7 @@ jobs:
uses: ./.github/actions/sanity-check uses: ./.github/actions/sanity-check
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-backend key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-backend
@ -118,6 +118,10 @@ jobs:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Collect Workflow Telemetry
uses: ./.github/actions/workflow-telemetry-action
with:
comment_on_pr: false
- uses: actions/download-artifact@v4 - uses: actions/download-artifact@v4
name: Download Binary Package name: Download Binary Package
with: with:

2
.github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-base.yaml

@ -20,7 +20,7 @@ version: "3"
services: services:
mysql: mysql:
container_name: mysql container_name: mysql
image: mysql:5.7.36 image: mysql:8.0.33
command: --default-authentication-plugin=mysql_native_password command: --default-authentication-plugin=mysql_native_password
restart: always restart: always
environment: environment:

5
.github/workflows/cluster-test/mysql_with_mysql_registry/docker-compose-cluster.yaml

@ -22,6 +22,11 @@ services:
container_name: ds container_name: ds
image: jdk8:ds_mysql_cluster image: jdk8:ds_mysql_cluster
restart: always restart: always
volumes:
- ${HOME}/ds_test/master-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/master-server/logs
- ${HOME}/ds_test/worker-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/worker-server/logs
- ${HOME}/ds_test/alert-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/alert-server/logs
- ${HOME}/ds_test/api-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/api-server/logs
ports: ports:
- "12345:12345" - "12345:12345"
- "5679:5679" - "5679:5679"

18
.github/workflows/cluster-test/mysql_with_mysql_registry/running_test.sh

@ -24,7 +24,7 @@ WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http:/
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health" ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check #Cluster start health check
TIMEOUT=180 TIMEOUT=300
START_HEALTHCHECK_EXITCODE=0 START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++)) for ((i=1; i<=TIMEOUT; i++))
@ -46,23 +46,23 @@ do
if [[ $i -eq $TIMEOUT ]];then if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log" cat ~/ds_test/master-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out" cat ~/ds_test/master-server/logs/*.out
echo "master start health check failed" echo "master start health check failed"
fi fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log" cat ~/ds_test/worker-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out" cat ~/ds_test/worker-server/logs/*.out
echo "worker start health check failed" echo "worker start health check failed"
fi fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log" cat ~/ds_test/api-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out" cat ~/ds_test/api-server/logs/*.out
echo "api start health check failed" echo "api start health check failed"
fi fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log" cat ~/ds_test/alert-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out" cat ~/ds_test/alert-server/logs/*.out
echo "alert start health check failed" echo "alert start health check failed"
fi fi
exit $START_HEALTHCHECK_EXITCODE exit $START_HEALTHCHECK_EXITCODE

8
.github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-base.yaml

@ -20,7 +20,7 @@ version: "3"
services: services:
mysql: mysql:
container_name: mysql container_name: mysql
image: mysql:5.7.36 image: mysql:8.0.33
command: --default-authentication-plugin=mysql_native_password command: --default-authentication-plugin=mysql_native_password
restart: always restart: always
environment: environment:
@ -34,7 +34,7 @@ services:
retries: 120 retries: 120
zoo1: zoo1:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo1 hostname: zoo1
ports: ports:
@ -44,7 +44,7 @@ services:
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2: zoo2:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo2 hostname: zoo2
ports: ports:
@ -54,7 +54,7 @@ services:
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3: zoo3:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo3 hostname: zoo3
ports: ports:

5
.github/workflows/cluster-test/mysql_with_zookeeper_registry/docker-compose-cluster.yaml

@ -22,6 +22,11 @@ services:
container_name: ds container_name: ds
image: jdk8:ds_mysql_cluster image: jdk8:ds_mysql_cluster
restart: always restart: always
volumes:
- ${HOME}/ds_test/master-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/master-server/logs
- ${HOME}/ds_test/worker-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/worker-server/logs
- ${HOME}/ds_test/alert-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/alert-server/logs
- ${HOME}/ds_test/api-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/api-server/logs
ports: ports:
- "12345:12345" - "12345:12345"
- "5679:5679" - "5679:5679"

18
.github/workflows/cluster-test/mysql_with_zookeeper_registry/running_test.sh

@ -24,7 +24,7 @@ WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http:/
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health" ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check #Cluster start health check
TIMEOUT=180 TIMEOUT=300
START_HEALTHCHECK_EXITCODE=0 START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++)) for ((i=1; i<=TIMEOUT; i++))
@ -46,23 +46,23 @@ do
if [[ $i -eq $TIMEOUT ]];then if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log" cat ~/ds_test/master-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out" cat ~/ds_test/master-server/logs/*.out
echo "master start health check failed" echo "master start health check failed"
fi fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log" cat ~/ds_test/worker-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out" cat ~/ds_test/worker-server/logs/*.out
echo "worker start health check failed" echo "worker start health check failed"
fi fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log" cat ~/ds_test/api-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out" cat ~/ds_test/api-server/logs/*.out
echo "api start health check failed" echo "api start health check failed"
fi fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log" cat ~/ds_test/alert-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out" cat ~/ds_test/alert-server/logs/*.out
echo "alert start health check failed" echo "alert start health check failed"
fi fi
exit $START_HEALTHCHECK_EXITCODE exit $START_HEALTHCHECK_EXITCODE

5
.github/workflows/cluster-test/postgresql_with_postgresql_registry/docker-compose-cluster.yaml

@ -22,6 +22,11 @@ services:
container_name: ds container_name: ds
image: jdk8:ds_postgresql_cluster image: jdk8:ds_postgresql_cluster
restart: always restart: always
volumes:
- ${HOME}/ds_test/master-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/master-server/logs
- ${HOME}/ds_test/worker-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/worker-server/logs
- ${HOME}/ds_test/alert-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/alert-server/logs
- ${HOME}/ds_test/api-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/api-server/logs
ports: ports:
- "12345:12345" - "12345:12345"
- "5679:5679" - "5679:5679"

45
.github/workflows/cluster-test/postgresql_with_postgresql_registry/running_test.sh

@ -24,7 +24,7 @@ WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http:/
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health" ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check #Cluster start health check
TIMEOUT=180 TIMEOUT=300
START_HEALTHCHECK_EXITCODE=0 START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++)) for ((i=1; i<=TIMEOUT; i++))
@ -45,29 +45,28 @@ do
fi fi
if [[ $i -eq $TIMEOUT ]];then if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log" cat ~/ds_test/master-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out" cat ~/ds_test/master-server/logs/*.out
echo "master start health check failed" echo "master start health check failed"
fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out"
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out"
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out"
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/worker-server/logs/dolphinscheduler-master.log
cat ~/ds_test/worker-server/logs/*.out
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/api-server/logs/dolphinscheduler-master.log
cat ~/ds_test/api-server/logs/*.out
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/alert-server/logs/dolphinscheduler-master.log
cat ~/ds_test/alert-server/logs/*.out
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi
sleep 1 sleep 1
done done

6
.github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-base.yaml

@ -34,7 +34,7 @@ services:
retries: 120 retries: 120
zoo1: zoo1:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo1 hostname: zoo1
ports: ports:
@ -44,7 +44,7 @@ services:
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo2: zoo2:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo2 hostname: zoo2
ports: ports:
@ -54,7 +54,7 @@ services:
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
zoo3: zoo3:
image: zookeeper:3.8.0 image: zookeeper:3.8
restart: always restart: always
hostname: zoo3 hostname: zoo3
ports: ports:

5
.github/workflows/cluster-test/postgresql_with_zookeeper_registry/docker-compose-cluster.yaml

@ -22,6 +22,11 @@ services:
container_name: ds container_name: ds
image: jdk8:ds_postgresql_cluster image: jdk8:ds_postgresql_cluster
restart: always restart: always
volumes:
- ${HOME}/ds_test/master-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/master-server/logs
- ${HOME}/ds_test/worker-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/worker-server/logs
- ${HOME}/ds_test/alert-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/alert-server/logs
- ${HOME}/ds_test/api-server/logs:/root/apache-dolphinscheduler-test-SNAPSHOT-bin/api-server/logs
ports: ports:
- "12345:12345" - "12345:12345"
- "5679:5679" - "5679:5679"

45
.github/workflows/cluster-test/postgresql_with_zookeeper_registry/running_test.sh

@ -24,7 +24,7 @@ WORKER_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http:/
ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health" ALERT_HEALTHCHECK_COMMAND="curl -I -m 10 -o /dev/null -s -w %{http_code} http://0.0.0.0:50053/actuator/health"
#Cluster start health check #Cluster start health check
TIMEOUT=180 TIMEOUT=300
START_HEALTHCHECK_EXITCODE=0 START_HEALTHCHECK_EXITCODE=0
for ((i=1; i<=TIMEOUT; i++)) for ((i=1; i<=TIMEOUT; i++))
@ -45,29 +45,28 @@ do
fi fi
if [[ $i -eq $TIMEOUT ]];then if [[ $i -eq $TIMEOUT ]];then
if [[ $MASTER_HTTP_STATUS -ne 200 ]];then if [[ $MASTER_HTTP_STATUS -ne 200 ]];then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/dolphinscheduler-master.log" cat ~/ds_test/master-server/logs/dolphinscheduler-master.log
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/master-server/logs/*.out" cat ~/ds_test/master-server/logs/*.out
echo "master start health check failed" echo "master start health check failed"
fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/dolphinscheduler-worker.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/worker-server/logs/*.out"
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/dolphinscheduler-api.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/api-server/logs/*.out"
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/dolphinscheduler-alert.log"
docker exec -u root ds bash -c "cat /root/apache-dolphinscheduler-*-SNAPSHOT-bin/alert-server/logs/*.out"
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi fi
if [[ $WORKER_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/worker-server/logs/dolphinscheduler-master.log
cat ~/ds_test/worker-server/logs/*.out
echo "worker start health check failed"
fi
if [[ $API_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/api-server/logs/dolphinscheduler-master.log
cat ~/ds_test/api-server/logs/*.out
echo "api start health check failed"
fi
if [[ $ALERT_HTTP_STATUS -ne 200 ]]; then
cat ~/ds_test/alert-server/logs/dolphinscheduler-master.log
cat ~/ds_test/alert-server/logs/*.out
echo "alert start health check failed"
fi
exit $START_HEALTHCHECK_EXITCODE
fi
sleep 1 sleep 1
done done

4
.github/workflows/e2e.yml

@ -61,7 +61,7 @@ jobs:
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-e2e key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-e2e
@ -148,7 +148,7 @@ jobs:
with: with:
comment_on_pr: false comment_on_pr: false
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-e2e key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-e2e

2
.github/workflows/publish-docker.yaml

@ -35,7 +35,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Cache local Maven repository - name: Cache local Maven repository
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-docker key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-docker

25
.github/workflows/unit-test.yml

@ -70,17 +70,29 @@ jobs:
with: with:
java-version: ${{ matrix.java }} java-version: ${{ matrix.java }}
distribution: 'adopt' distribution: 'adopt'
- uses: actions/cache@v3 - uses: actions/cache@v4
with: with:
path: ~/.m2/repository path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-unit-test key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-unit-test
restore-keys: ${{ runner.os }}-maven- restore-keys: ${{ runner.os }}-maven-
- name: Run Unit tests - name: Run Unit tests
run: ./mvnw clean verify -B -Dmaven.test.skip=false -Dspotless.skip=true -DskipUT=false run: ./mvnw clean verify -B -Dmaven.test.skip=false -Dspotless.skip=true -DskipUT=false
- name: Upload coverage report to codecov - name: Upload coverage report to codecov
run: CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash) run: CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
sonarcloud-analysis:
name: SonarCloud-Analysis
needs: paths-filter
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
with:
submodules: true
- uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}-unit-test
restore-keys: ${{ runner.os }}-maven-
# Set up JDK 17 for SonarCloud. # Set up JDK 17 for SonarCloud.
- name: Set up JDK 17 - name: Set up JDK 17
uses: actions/setup-java@v2 uses: actions/setup-java@v2
@ -97,19 +109,18 @@ jobs:
-Dsonar.organization=apache -Dsonar.organization=apache
-Dsonar.core.codeCoveragePlugin=jacoco -Dsonar.core.codeCoveragePlugin=jacoco
-Dsonar.projectKey=apache-dolphinscheduler -Dsonar.projectKey=apache-dolphinscheduler
-Dsonar.login=e4058004bc6be89decf558ac819aa1ecbee57682
-Dsonar.exclusions=,dolphinscheduler-ui/src/**/i18n/locale/*.js,dolphinscheduler-microbench/src/**/* -Dsonar.exclusions=,dolphinscheduler-ui/src/**/i18n/locale/*.js,dolphinscheduler-microbench/src/**/*
-Dsonar.token=e4058004bc6be89decf558ac819aa1ecbee57682
-Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.httpconnectionManager.ttlSeconds=120
-DskipUT=true -DskipUT=true
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
result: result:
name: Unit Test name: Unit Test
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 30 timeout-minutes: 30
needs: [ unit-test, paths-filter ] needs: [ unit-test, paths-filter, sonarcloud-analysis ]
if: always() if: always()
steps: steps:
- name: Status - name: Status
@ -118,7 +129,7 @@ jobs:
echo "Skip Unit Test!" echo "Skip Unit Test!"
exit 0 exit 0
fi fi
if [[ ${{ needs.unit-test.result }} != 'success' ]]; then if [[ ${{ needs.unit-test.result }} != 'success' || ${{ needs.sonarcloud-analysis.result }} != 'success' ]]; then
echo "Unit Test Failed!" echo "Unit Test Failed!"
exit -1 exit -1
fi fi

3
pom.xml

@ -642,9 +642,6 @@
<goals> <goals>
<goal>restore-instrumented-classes</goal> <goal>restore-instrumented-classes</goal>
</goals> </goals>
<configuration>
<excludes>com/github/dreamhead/moco/*</excludes>
</configuration>
</execution> </execution>
<execution> <execution>
<id>default-report</id> <id>default-report</id>

Loading…
Cancel
Save