Browse Source

[1.3.6-prepare][Improvement][Docker] Add workdir for dockerfile and remove redundant container_name and dolphinscheduler-postgresql-initdb (#5224)

* [1.3.6-prepare][Improvement][Docker] Add workdir for dockerfile

* [1.3.6-prepare][Improvement][Docker] Remove redundant container_name and dolphinscheduler-postgresql-initdb

* [1.3.6-prepare][Improvement][Installation] Update description in install config
1.3.6-release
Shiwen Cheng 3 years ago committed by GitHub
parent
commit
22c1c0e65a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      .github/workflows/ci_ut.yml
  2. 1
      docker/build/Dockerfile
  3. 8
      docker/docker-swarm/docker-compose.yml
  4. 2
      docker/kubernetes/dolphinscheduler/values.yaml
  5. 8
      dolphinscheduler-server/src/main/resources/config/install_config.conf

4
.github/workflows/ci_ut.yml

@ -48,9 +48,9 @@ jobs:
- name: Bootstrap database
run: |
sed -i "s/: root/: test/g" $(pwd)/docker/docker-swarm/docker-compose.yml
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up --no-start --force-recreate dolphinscheduler-zookeeper dolphinscheduler-postgresql
sudo cp $(pwd)/sql/dolphinscheduler_postgre.sql $(docker volume inspect docker-swarm_dolphinscheduler-postgresql-initdb | grep "Mountpoint" | awk -F "\"" '{print $4}')
docker-compose -f $(pwd)/docker/docker-swarm/docker-compose.yml up -d dolphinscheduler-zookeeper dolphinscheduler-postgresql
until docker logs docker-swarm_dolphinscheduler-postgresql_1 2>&1 | grep 'listening on IPv4 address'; do echo "wait for postgresql ..."; sleep 1; done
docker run --rm --network docker-swarm_dolphinscheduler -v $(pwd)/sql/dolphinscheduler_postgre.sql:/docker-entrypoint-initdb.d/dolphinscheduler_postgre.sql bitnami/postgresql:latest bash -c "PGPASSWORD=test psql -h docker-swarm_dolphinscheduler-postgresql_1 -U test -d dolphinscheduler -v ON_ERROR_STOP=1 -f /docker-entrypoint-initdb.d/dolphinscheduler_postgre.sql"
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:

1
docker/build/Dockerfile

@ -43,6 +43,7 @@ RUN apt-get update && \
ADD ./apache-dolphinscheduler-${VERSION}-bin.tar.gz /opt/
RUN ln -s /opt/apache-dolphinscheduler-${VERSION}-bin /opt/dolphinscheduler
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
WORKDIR ${DOLPHINSCHEDULER_HOME}
# 3. add configuration and modify permissions and set soft links
COPY ./checkpoint.sh /root/checkpoint.sh

8
docker/docker-swarm/docker-compose.yml

@ -20,7 +20,6 @@ services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
container_name: dolphinscheduler-postgresql
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
@ -28,14 +27,12 @@ services:
POSTGRESQL_DATABASE: dolphinscheduler
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
restart: unless-stopped
networks:
- dolphinscheduler
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
container_name: dolphinscheduler-zookeeper
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
@ -48,7 +45,6 @@ services:
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api
command: api-server
ports:
- 12345:12345
@ -73,7 +69,6 @@ services:
dolphinscheduler-alert:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert
command: alert-server
environment:
TZ: Asia/Shanghai
@ -93,7 +88,6 @@ services:
dolphinscheduler-master:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master
command: master-server
environment:
TZ: Asia/Shanghai
@ -115,7 +109,6 @@ services:
dolphinscheduler-worker:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker
command: worker-server
environment:
TZ: Asia/Shanghai
@ -143,7 +136,6 @@ networks:
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
dolphinscheduler-logs:

2
docker/kubernetes/dolphinscheduler/values.yaml

@ -100,7 +100,7 @@ common:
HIVE_HOME: "/opt/soft/hive"
FLINK_HOME: "/opt/soft/flink"
DATAX_HOME: "/opt/soft/datax"
## Shared storage persistence mounted into master and worker, such as Hadoop, Spark, Flink and DataX binary package
## Shared storage persistence mounted into api, master and worker, such as Hadoop, Spark, Flink and DataX binary package
sharedStoragePersistence:
enabled: false
mountPath: "/opt/soft"

8
dolphinscheduler-server/src/main/resources/config/install_config.conf

@ -74,7 +74,7 @@ sslEnable="false"
sslTrust="smtp.exmail.qq.com"
# resource storage type:HDFS,S3,NONE
# resource storage type: HDFS, S3, NONE
resourceStorageType="NONE"
# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
@ -87,13 +87,13 @@ s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
# if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if resourcemanager HA enable or not use resourcemanager, please skip this value setting; If resourcemanager is single, you only need to replace yarnIp1 to actual resourcemanager hostname.
# if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
singleYarnIp="yarnIp1"
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。/dolphinscheduler is recommended
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
resourceUploadPath="/dolphinscheduler"
# who have permissions to create directory under HDFS/S3 root path

Loading…
Cancel
Save