From 472723d7e69406d77da75b70fa2be423b8a5f3db Mon Sep 17 00:00:00 2001 From: BoYiZhang <39816903+BoYiZhang@users.noreply.github.com> Date: Wed, 8 Apr 2020 22:30:31 +0800 Subject: [PATCH 1/3] Fix document errors (#2382) * Remove invalid code * fix document errors Co-authored-by: zhanglong Co-authored-by: dailidong --- .../dolphinscheduler/api/controller/SchedulerController.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java index 96038dcf8c..d246a0f73a 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java @@ -76,7 +76,7 @@ public class SchedulerController extends BaseController { @ApiOperation(value = "createSchedule", notes= "CREATE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"), - @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "Int", example = "100"), + @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"), @ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type ="WarningType"), @ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type ="FailureStrategy"), @@ -132,7 +132,7 @@ public class SchedulerController extends BaseController { @ApiOperation(value = "updateSchedule", notes= "UPDATE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100"), - @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "Int", example = "100"), + @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"), @ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type ="WarningType"), @ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type ="FailureStrategy"), From 57d47652fa8e00b7271706d2060fbe7f3f0de642 Mon Sep 17 00:00:00 2001 From: Zonglei Dong Date: Wed, 8 Apr 2020 23:39:56 +0800 Subject: [PATCH 2/3] Add maven-wrapper support for dolphinscheduler. (#2381) * add maven-wrapper support. * fixes maven build command to maven-wrapper. Co-authored-by: dailidong --- .mvn/jvm.config | 1 + .mvn/wrapper/MavenWrapperDownloader.java | 117 +++++++++ .mvn/wrapper/maven-wrapper.properties | 2 + README.md | 2 +- README_zh_CN.md | 2 +- mvnw | 310 +++++++++++++++++++++++ mvnw.cmd | 182 +++++++++++++ 7 files changed, 614 insertions(+), 2 deletions(-) create mode 100644 .mvn/jvm.config create mode 100644 .mvn/wrapper/MavenWrapperDownloader.java create mode 100644 .mvn/wrapper/maven-wrapper.properties create mode 100755 mvnw create mode 100644 mvnw.cmd diff --git a/.mvn/jvm.config b/.mvn/jvm.config new file mode 100644 index 0000000000..20be3f8273 --- /dev/null +++ b/.mvn/jvm.config @@ -0,0 +1 @@ +-Xmx1024m -XX:MaxMetaspaceSize=256m diff --git a/.mvn/wrapper/MavenWrapperDownloader.java b/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..b901097f2d --- /dev/null +++ b/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,117 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if(mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if(mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if(!outputFile.getParentFile().exists()) { + if(!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..642d572ce9 --- /dev/null +++ b/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/README.md b/README.md index ebd620efee..84f9ccfa66 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Welcome to participate in contributing, please refer to the process of submittin ### How to Build ```bash -mvn clean install -Prelease +./mvnw clean install -Prelease ``` Artifact: diff --git a/README_zh_CN.md b/README_zh_CN.md index 6a4adc8daa..2c8aa11bf8 100644 --- a/README_zh_CN.md +++ b/README_zh_CN.md @@ -77,7 +77,7 @@ DolphinScheduler的工作计划: \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG="`dirname "$PRG"`/$link" + fi + done + + saveddir=`pwd` + + M2_HOME=`dirname "$PRG"`/.. + + # make it fully qualified + M2_HOME=`cd "$M2_HOME" && pwd` + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --unix "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --unix "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --unix "$CLASSPATH"` +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$M2_HOME" ] && + M2_HOME="`(cd "$M2_HOME"; pwd)`" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="`which javac`" + if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=`which readlink` + if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then + if $darwin ; then + javaHome="`dirname \"$javaExecutable\"`" + javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" + else + javaExecutable="`readlink -f \"$javaExecutable\"`" + fi + javaHome="`dirname \"$javaExecutable\"`" + javaHome=`expr "$javaHome" : '\(.*\)/bin'` + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="`which java`" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=`cd "$wdir/.."; pwd` + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' < "$1")" + fi +} + +BASE_DIR=`find_maven_basedir "$(pwd)"` +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in (wrapperUrl) jarUrl="$value"; break ;; + esac + done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=`cygpath --path --windows "$wrapperJarPath"` + fi + + if command -v wget > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=`cygpath --path --windows "$javaClass"` + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=`cygpath --path --windows "$M2_HOME"` + [ -n "$JAVA_HOME" ] && + JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` + [ -n "$CLASSPATH" ] && + CLASSPATH=`cygpath --path --windows "$CLASSPATH"` + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/mvnw.cmd b/mvnw.cmd new file mode 100644 index 0000000000..86115719e5 --- /dev/null +++ b/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% From 294ec783411a83ec9d92030024f081e742529932 Mon Sep 17 00:00:00 2001 From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com> Date: Thu, 9 Apr 2020 11:22:47 +0800 Subject: [PATCH 3/3] Refactor dockerfile (#2384) * Remove .helmignore file * Refactor dockerfile --- dockerfile/Dockerfile | 2 +- dockerfile/README.md | 24 ++-- dockerfile/README_zh_CN.md | 24 ++-- .../application-api.properties.tpl | 8 ++ .../application.properties.tpl | 115 ------------------ .../dolphinscheduler/common.properties.tpl | 76 ++++++------ .../datasource.properties.tpl | 71 +++++++++++ .../dolphinscheduler/master.properties.tpl | 40 ++++++ .../dolphinscheduler/quartz.properties.tpl | 47 ++++--- .../dolphinscheduler/worker.properties.tpl | 37 ++++++ .../dolphinscheduler/zookeeper.properties.tpl | 29 +++++ dockerfile/startup-init-conf.sh | 6 +- 12 files changed, 270 insertions(+), 209 deletions(-) delete mode 100644 dockerfile/conf/dolphinscheduler/application.properties.tpl create mode 100644 dockerfile/conf/dolphinscheduler/datasource.properties.tpl create mode 100644 dockerfile/conf/dolphinscheduler/master.properties.tpl create mode 100644 dockerfile/conf/dolphinscheduler/worker.properties.tpl create mode 100644 dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl diff --git a/dockerfile/Dockerfile b/dockerfile/Dockerfile index acd2cb99e5..c48b51e377 100644 --- a/dockerfile/Dockerfile +++ b/dockerfile/Dockerfile @@ -90,6 +90,6 @@ RUN chmod +x /root/checkpoint.sh && \ RUN rm -rf /var/cache/apk/* #9. expose port -EXPOSE 2181 2888 3888 5432 12345 50051 8888 +EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888 ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"] \ No newline at end of file diff --git a/dockerfile/README.md b/dockerfile/README.md index 60af7fad8f..b407f57d3b 100644 --- a/dockerfile/README.md +++ b/dockerfile/README.md @@ -162,18 +162,6 @@ This environment variable sets the runtime environment for task. The default val User data directory path, self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler` -**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`** - -Directory path for user data download. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/download` - -**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`** - -Process execute directory. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/exec` - -**`TASK_QUEUE`** - -This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`. - **`ZOOKEEPER_QUORUM`** This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`. @@ -208,6 +196,10 @@ This environment variable sets max cpu load avg for `master-server`. The default This environment variable sets reserved memory for `master-server`. The default value is `0.1`. +**`MASTER_LISTEN_PORT`** + +This environment variable sets port for `master-server`. The default value is `5678`. + **`WORKER_EXEC_THREADS`** This environment variable sets exec thread num for `worker-server`. The default value is `100`. @@ -228,6 +220,14 @@ This environment variable sets max cpu load avg for `worker-server`. The default This environment variable sets reserved memory for `worker-server`. The default value is `0.1`. +**`WORKER_LISTEN_PORT`** + +This environment variable sets port for `worker-server`. The default value is `1234`. + +**`WORKER_GROUP`** + +This environment variable sets group for `worker-server`. The default value is `default`. + **`XLS_FILE_PATH`** This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`. diff --git a/dockerfile/README_zh_CN.md b/dockerfile/README_zh_CN.md index 900c8b50d9..187261581d 100644 --- a/dockerfile/README_zh_CN.md +++ b/dockerfile/README_zh_CN.md @@ -162,18 +162,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些 用户数据目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler`。 -**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`** - -用户数据下载目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/download`。 - -**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`** - -任务执行目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/exec`。 - -**`TASK_QUEUE`** - -配置`master-server`和`worker-serverr`的`Zookeeper`任务队列名, 默认值 `zookeeper`。 - **`ZOOKEEPER_QUORUM`** 配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`。 @@ -208,6 +196,10 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些 配置`master-server`的保留内存,默认值 `0.1`。 +**`MASTER_LISTEN_PORT`** + +配置`master-server`的端口,默认值 `5678`。 + **`WORKER_EXEC_THREADS`** 配置`worker-server`中的执行线程数量,默认值 `100`。 @@ -228,6 +220,14 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些 配置`worker-server`的保留内存,默认值 `0.1`。 +**`WORKER_LISTEN_PORT`** + +配置`worker-server`的端口,默认值 `1234`。 + +**`WORKER_GROUP`** + +配置`worker-server`的分组,默认值 `default`。 + **`XLS_FILE_PATH`** 配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`。 diff --git a/dockerfile/conf/dolphinscheduler/application-api.properties.tpl b/dockerfile/conf/dolphinscheduler/application-api.properties.tpl index 424ea55f7d..88915923fa 100644 --- a/dockerfile/conf/dolphinscheduler/application-api.properties.tpl +++ b/dockerfile/conf/dolphinscheduler/application-api.properties.tpl @@ -14,21 +14,29 @@ # See the License for the specific language governing permissions and # limitations under the License. # + # server port server.port=12345 + # session config server.servlet.session.timeout=7200 + # servlet config server.servlet.context-path=/dolphinscheduler/ + # file size limit for upload spring.servlet.multipart.max-file-size=1024MB spring.servlet.multipart.max-request-size=1024MB + # post content server.jetty.max-http-post-size=5000000 + # i18n spring.messages.encoding=UTF-8 + #i18n classpath folder , file prefix messages, if have many files, use "," seperator spring.messages.basename=i18n/messages + # Authentication types (supported types: PASSWORD) security.authentication.type=PASSWORD diff --git a/dockerfile/conf/dolphinscheduler/application.properties.tpl b/dockerfile/conf/dolphinscheduler/application.properties.tpl deleted file mode 100644 index 6dd8a18e11..0000000000 --- a/dockerfile/conf/dolphinscheduler/application.properties.tpl +++ /dev/null @@ -1,115 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# base spring data source configuration -spring.datasource.type=com.alibaba.druid.pool.DruidDataSource -# postgre -spring.datasource.driver-class-name=org.postgresql.Driver -spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8 -# mysql -#spring.datasource.driver-class-name=com.mysql.jdbc.Driver -#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 -spring.datasource.username=${POSTGRESQL_USERNAME} -spring.datasource.password=${POSTGRESQL_PASSWORD} -# connection configuration -spring.datasource.initialSize=5 -# min connection number -spring.datasource.minIdle=5 -# max connection number -spring.datasource.maxActive=50 -# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. -# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. -spring.datasource.maxWait=60000 -# milliseconds for check to close free connections -spring.datasource.timeBetweenEvictionRunsMillis=60000 -# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. -spring.datasource.timeBetweenConnectErrorMillis=60000 -# the longest time a connection remains idle without being evicted, in milliseconds -spring.datasource.minEvictableIdleTimeMillis=300000 -#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. -spring.datasource.validationQuery=SELECT 1 -#check whether the connection is valid for timeout, in seconds -spring.datasource.validationQueryTimeout=3 -# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, -# validation Query is performed to check whether the connection is valid -spring.datasource.testWhileIdle=true -#execute validation to check if the connection is valid when applying for a connection -spring.datasource.testOnBorrow=true -#execute validation to check if the connection is valid when the connection is returned -spring.datasource.testOnReturn=false -spring.datasource.defaultAutoCommit=true -spring.datasource.keepAlive=true -# open PSCache, specify count PSCache for every connection -spring.datasource.poolPreparedStatements=true -spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 -spring.datasource.spring.datasource.filters=stat,wall,log4j -spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000 - -#mybatis -mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml -mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums -#Entity scan, where multiple packages are separated by a comma or semicolon -mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity -#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID"; -mybatis-plus.global-config.db-config.id-type=AUTO -#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment" -mybatis-plus.global-config.db-config.field-strategy=NOT_NULL -#The hump underline is converted -mybatis-plus.global-config.db-config.column-underline=true -mybatis-plus.global-config.db-config.logic-delete-value=-1 -mybatis-plus.global-config.db-config.logic-not-delete-value=0 -mybatis-plus.global-config.db-config.banner=false -#The original configuration -mybatis-plus.configuration.map-underscore-to-camel-case=true -mybatis-plus.configuration.cache-enabled=false -mybatis-plus.configuration.call-setters-on-nulls=true -mybatis-plus.configuration.jdbc-type-for-null=null - -# master settings -# master execute thread num -master.exec.threads=${MASTER_EXEC_THREADS} -# master execute task number in parallel -master.exec.task.num=${MASTER_EXEC_TASK_NUM} -# master heartbeat interval -master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL} -# master commit task retry times -master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES} -# master commit task interval -master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL} -# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 -master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG} -# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. -master.reserved.memory=${MASTER_RESERVED_MEMORY} - -# worker settings -# worker execute thread num -worker.exec.threads=${WORKER_EXEC_THREADS} -# worker heartbeat interval -worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL} -# submit the number of tasks at a time -worker.fetch.task.num=${WORKER_FETCH_TASK_NUM} -# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 -worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG} -# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. -worker.reserved.memory=${WORKER_RESERVED_MEMORY} - -# data quality analysis is not currently in use. please ignore the following configuration -# task record -task.record.flag=false -task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8 -task.record.datasource.username=xx -task.record.datasource.password=xx diff --git a/dockerfile/conf/dolphinscheduler/common.properties.tpl b/dockerfile/conf/dolphinscheduler/common.properties.tpl index 8134fc7a9b..f318ff8414 100644 --- a/dockerfile/conf/dolphinscheduler/common.properties.tpl +++ b/dockerfile/conf/dolphinscheduler/common.properties.tpl @@ -15,70 +15,64 @@ # limitations under the License. # -#task queue implementation, default "zookeeper" -dolphinscheduler.queue.impl=${TASK_QUEUE} - -#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 -zookeeper.quorum=${ZOOKEEPER_QUORUM} -#dolphinscheduler root directory -zookeeper.dolphinscheduler.root=/dolphinscheduler -#dolphinscheduler failover directory -zookeeper.session.timeout=300 -zookeeper.connection.timeout=300 -zookeeper.retry.base.sleep=100 -zookeeper.retry.max.sleep=30000 -zookeeper.retry.maxtime=5 - #============================================================================ # System #============================================================================ # system env path. self configuration, please make sure the directory and file exists and have read write execute permissions dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH} -#resource.view.suffixs -resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties -# is development state? default "false" -development.state=true + # user data directory path, self configuration, please make sure the directory exists and have read write permissions data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH} -# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions -data.download.basedir.path=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH} -# process execute directory. self configuration, please make sure the directory exists and have read write permissions -process.exec.basepath=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH} # resource upload startup type : HDFS,S3,NONE -res.upload.startup.type=NONE +resource.storage.type=NONE #============================================================================ # HDFS #============================================================================ -# Users who have permission to create directories under the HDFS root path -hdfs.root.user=hdfs -# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended -data.store2hdfs.basepath=/dolphinscheduler +# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended +#resource.upload.path=/dolphinscheduler + # whether kerberos starts -hadoop.security.authentication.startup.state=false +#hadoop.security.authentication.startup.state=false + # java.security.krb5.conf path -java.security.krb5.conf.path=/opt/krb5.conf +#java.security.krb5.conf.path=/opt/krb5.conf + # loginUserFromKeytab user -login.user.keytab.username=hdfs-mycluster@ESZ.COM +#login.user.keytab.username=hdfs-mycluster@ESZ.COM + # loginUserFromKeytab path -login.user.keytab.path=/opt/hdfs.headless.keytab +#login.user.keytab.path=/opt/hdfs.headless.keytab + +#resource.view.suffixs +#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties + +# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path +hdfs.root.user=hdfs + +# kerberos expire time +kerberos.expire.time=7 #============================================================================ # S3 #============================================================================ -# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml -# to the conf directory,support s3,for example : s3a://dolphinscheduler +# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir fs.defaultFS=hdfs://mycluster:8020 -# s3 need,s3 endpoint -fs.s3a.endpoint=http://192.168.199.91:9010 -# s3 need,s3 access key -fs.s3a.access.key=A3DXS30FO22544RE -# s3 need,s3 secret key -fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK -#resourcemanager ha note this need ips , this empty if single + +# if resource.storage.type=S3,s3 endpoint +#fs.s3a.endpoint=http://192.168.199.91:9010 + +# if resource.storage.type=S3,s3 access key +#fs.s3a.access.key=A3DXS30FO22544RE + +# if resource.storage.type=S3,s3 secret key +#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK + +# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx -# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine + +# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname. yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s diff --git a/dockerfile/conf/dolphinscheduler/datasource.properties.tpl b/dockerfile/conf/dolphinscheduler/datasource.properties.tpl new file mode 100644 index 0000000000..aefb9e3b0b --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/datasource.properties.tpl @@ -0,0 +1,71 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# mysql +#spring.datasource.driver-class-name=com.mysql.jdbc.Driver +#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 + +# postgre +spring.datasource.driver-class-name=org.postgresql.Driver +spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8 +spring.datasource.username=${POSTGRESQL_USERNAME} +spring.datasource.password=${POSTGRESQL_PASSWORD} + +## base spring data source configuration todo need to remove +#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource + +# connection configuration +#spring.datasource.initialSize=5 +# min connection number +#spring.datasource.minIdle=5 +# max connection number +#spring.datasource.maxActive=50 + +# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. +# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. +#spring.datasource.maxWait=60000 + +# milliseconds for check to close free connections +#spring.datasource.timeBetweenEvictionRunsMillis=60000 + +# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. +#spring.datasource.timeBetweenConnectErrorMillis=60000 + +# the longest time a connection remains idle without being evicted, in milliseconds +#spring.datasource.minEvictableIdleTimeMillis=300000 + +#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. +#spring.datasource.validationQuery=SELECT 1 + +#check whether the connection is valid for timeout, in seconds +#spring.datasource.validationQueryTimeout=3 + +# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, +# validation Query is performed to check whether the connection is valid +#spring.datasource.testWhileIdle=true + +#execute validation to check if the connection is valid when applying for a connection +#spring.datasource.testOnBorrow=true +#execute validation to check if the connection is valid when the connection is returned +#spring.datasource.testOnReturn=false +#spring.datasource.defaultAutoCommit=true +#spring.datasource.keepAlive=true + +# open PSCache, specify count PSCache for every connection +#spring.datasource.poolPreparedStatements=true +#spring.datasource.maxPoolPreparedStatementPerConnectionSize=20 \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/master.properties.tpl b/dockerfile/conf/dolphinscheduler/master.properties.tpl new file mode 100644 index 0000000000..17dd6f9d69 --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/master.properties.tpl @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# master execute thread num +master.exec.threads=${MASTER_EXEC_THREADS} + +# master execute task number in parallel +master.exec.task.num=${MASTER_EXEC_TASK_NUM} + +# master heartbeat interval +master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL} + +# master commit task retry times +master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES} + +# master commit task interval +master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL} + +# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 +master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG} + +# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. +master.reserved.memory=${MASTER_RESERVED_MEMORY} + +# master listen port +#master.listen.port=${MASTER_LISTEN_PORT} \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/quartz.properties.tpl b/dockerfile/conf/dolphinscheduler/quartz.properties.tpl index 7c7c92e425..25645795bb 100644 --- a/dockerfile/conf/dolphinscheduler/quartz.properties.tpl +++ b/dockerfile/conf/dolphinscheduler/quartz.properties.tpl @@ -19,39 +19,36 @@ # Configure Main Scheduler Properties #============================================================================ #org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate -org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate -# postgre -org.quartz.dataSource.myDs.driver = org.postgresql.Driver -org.quartz.dataSource.myDs.URL = jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8 -org.quartz.dataSource.myDs.user = ${POSTGRESQL_USERNAME} -org.quartz.dataSource.myDs.password = ${POSTGRESQL_PASSWORD} -org.quartz.scheduler.instanceName = DolphinScheduler -org.quartz.scheduler.instanceId = AUTO -org.quartz.scheduler.makeSchedulerThreadDaemon = true -org.quartz.jobStore.useProperties = false +#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate + +#org.quartz.scheduler.instanceName = DolphinScheduler +#org.quartz.scheduler.instanceId = AUTO +#org.quartz.scheduler.makeSchedulerThreadDaemon = true +#org.quartz.jobStore.useProperties = false #============================================================================ # Configure ThreadPool #============================================================================ -org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool -org.quartz.threadPool.makeThreadsDaemons = true -org.quartz.threadPool.threadCount = 25 -org.quartz.threadPool.threadPriority = 5 + +#org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool +#org.quartz.threadPool.makeThreadsDaemons = true +#org.quartz.threadPool.threadCount = 25 +#org.quartz.threadPool.threadPriority = 5 #============================================================================ # Configure JobStore #============================================================================ -org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX -org.quartz.jobStore.tablePrefix = QRTZ_ -org.quartz.jobStore.isClustered = true -org.quartz.jobStore.misfireThreshold = 60000 -org.quartz.jobStore.clusterCheckinInterval = 5000 -org.quartz.jobStore.acquireTriggersWithinLock=true -org.quartz.jobStore.dataSource = myDs + +#org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX + +#org.quartz.jobStore.tablePrefix = QRTZ_ +#org.quartz.jobStore.isClustered = true +#org.quartz.jobStore.misfireThreshold = 60000 +#org.quartz.jobStore.clusterCheckinInterval = 5000 +#org.quartz.jobStore.acquireTriggersWithinLock=true +#org.quartz.jobStore.dataSource = myDs #============================================================================ -# Configure Datasources +# Configure Datasources #============================================================================ -org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider -org.quartz.dataSource.myDs.maxConnections = 10 -org.quartz.dataSource.myDs.validationQuery = select 1 \ No newline at end of file +#org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/worker.properties.tpl b/dockerfile/conf/dolphinscheduler/worker.properties.tpl new file mode 100644 index 0000000000..d596be94bc --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/worker.properties.tpl @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# worker execute thread num +worker.exec.threads=${WORKER_EXEC_THREADS} + +# worker heartbeat interval +worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL} + +# submit the number of tasks at a time +worker.fetch.task.num=${WORKER_FETCH_TASK_NUM} + +# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 +worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG} + +# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G. +worker.reserved.memory=${WORKER_RESERVED_MEMORY} + +# worker listener port +#worker.listen.port=${WORKER_LISTEN_PORT} + +# default worker group +#worker.group=${WORKER_GROUP} \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl b/dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl new file mode 100644 index 0000000000..a0ef72dc8f --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl @@ -0,0 +1,29 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 +zookeeper.quorum=${ZOOKEEPER_QUORUM} + +# dolphinscheduler root directory +#zookeeper.dolphinscheduler.root=/dolphinscheduler + +# dolphinscheduler failover directory +#zookeeper.session.timeout=300 +#zookeeper.connection.timeout=300 +#zookeeper.retry.base.sleep=100 +#zookeeper.retry.max.sleep=30000 +#zookeeper.retry.maxtime=5 \ No newline at end of file diff --git a/dockerfile/startup-init-conf.sh b/dockerfile/startup-init-conf.sh index d2b50fa3e7..da6eb21b7d 100644 --- a/dockerfile/startup-init-conf.sh +++ b/dockerfile/startup-init-conf.sh @@ -35,13 +35,10 @@ export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-"dolphinscheduler"} #============================================================================ export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"} export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"} -export DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH:-"/tmp/dolphinscheduler/download"} -export DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH:-"/tmp/dolphinscheduler/exec"} #============================================================================ # Zookeeper #============================================================================ -export TASK_QUEUE=${TASK_QUEUE:-"zookeeper"} export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"} #============================================================================ @@ -54,6 +51,7 @@ export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"} export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"} export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"100"} export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.1"} +export MASTER_LISTEN_PORT=${MASTER_LISTEN_PORT:-"5678"} #============================================================================ # Worker Server @@ -63,6 +61,8 @@ export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"} export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"} export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"} export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"} +export WORKER_LISTEN_PORT=${WORKER_LISTEN_PORT:-"1234"} +export WORKER_GROUP=${WORKER_GROUP:-"default"} #============================================================================ # Alert Server