diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json b/ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json new file mode 100644 index 0000000000..2a9800e59a --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json @@ -0,0 +1,164 @@ +{ + "DOLPHIN": { + "service": [], + "DOLPHIN_API": [ + { + "name": "dolphin_api_port_check", + "label": "dolphin_api_port_check", + "description": "dolphin_api_port_check.", + "interval": 10, + "scope": "ANY", + "source": { + "type": "PORT", + "uri": "{{dolphin-application-api/server.port}}", + "default_port": 12345, + "reporting": { + "ok": { + "text": "TCP OK - {0:.3f}s response on port {1}" + }, + "warning": { + "text": "TCP OK - {0:.3f}s response on port {1}", + "value": 1.5 + }, + "critical": { + "text": "Connection failed: {0} to {1}:{2}", + "value": 5.0 + } + } + } + } + ], + "DOLPHIN_MASTER": [ + { + "name": "DOLPHIN_MASTER_CHECK", + "label": "check dolphin scheduler master status", + "description": "", + "interval":10, + "scope": "HOST", + "enabled": true, + "source": { + "type": "SCRIPT", + "path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py", + "parameters": [ + + { + "name": "connection.timeout", + "display_name": "Connection Timeout", + "value": 5.0, + "type": "NUMERIC", + "description": "The maximum time before this alert is considered to be CRITICAL", + "units": "seconds", + "threshold": "CRITICAL" + }, + { + "name": "alertName", + "display_name": "alertName", + "value": "DOLPHIN_MASTER", + "type": "STRING", + "description": "alert name" + } + ] + } + } + ], + "DOLPHIN_WORKER": [ + { + "name": "DOLPHIN_WORKER_CHECK", + "label": "check dolphin scheduler worker status", + "description": "", + "interval":10, + "scope": "HOST", + "enabled": true, + "source": { + "type": "SCRIPT", + "path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py", + "parameters": [ + + { + "name": "connection.timeout", + "display_name": "Connection Timeout", + "value": 5.0, + "type": "NUMERIC", + "description": "The maximum time before this alert is considered to be CRITICAL", + "units": "seconds", + "threshold": "CRITICAL" + }, + { + "name": "alertName", + "display_name": "alertName", + "value": "DOLPHIN_WORKER", + "type": "STRING", + "description": "alert name" + } + ] + } + } + ], + "DOLPHIN_ALERT": [ + { + "name": "DOLPHIN_DOLPHIN_ALERT_CHECK", + "label": "check dolphin scheduler alert status", + "description": "", + "interval":10, + "scope": "HOST", + "enabled": true, + "source": { + "type": "SCRIPT", + "path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py", + "parameters": [ + + { + "name": "connection.timeout", + "display_name": "Connection Timeout", + "value": 5.0, + "type": "NUMERIC", + "description": "The maximum time before this alert is considered to be CRITICAL", + "units": "seconds", + "threshold": "CRITICAL" + }, + { + "name": "alertName", + "display_name": "alertName", + "value": "DOLPHIN_ALERT", + "type": "STRING", + "description": "alert name" + } + ] + } + } + ], + "DOLPHIN_ALERT": [ + { + "name": "DOLPHIN_DOLPHIN_LOGGER_CHECK", + "label": "check dolphin scheduler alert status", + "description": "", + "interval":10, + "scope": "HOST", + "enabled": true, + "source": { + "type": "SCRIPT", + "path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py", + "parameters": [ + + { + "name": "connection.timeout", + "display_name": "Connection Timeout", + "value": 5.0, + "type": "NUMERIC", + "description": "The maximum time before this alert is considered to be CRITICAL", + "units": "seconds", + "threshold": "CRITICAL" + }, + { + "name": "alertName", + "display_name": "alertName", + "value": "DOLPHIN_LOGGER", + "type": "STRING", + "description": "alert name" + } + ] + } + } + ] + } +} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml new file mode 100644 index 0000000000..5f44a1a4c8 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-alert.xml @@ -0,0 +1,143 @@ + + + + alert.type + EMAIL + alert type is EMAIL/SMS + + + + alert.template + html + alter msg template, default is html template + + + + mail.protocol + SMTP + + + + + mail.server.host + xxx.xxx.com + + + + + mail.server.port + 25 + + int + + + + + + mail.sender + admin + + + + + mail.user + admin + + + + + mail.passwd + 000000 + + PASSWORD + + password + + + + + + mail.smtp.starttls.enable + true + + boolean + + + + + + mail.smtp.ssl.enable + true + + boolean + + + + + + mail.smtp.ssl.trust + xxx.xxx.com + + + + + + enterprise.wechat.enable + false + + + value-list + + + true + + + + false + + + + 1 + + + + + enterprise.wechat.corp.id + wechatId + + + + + enterprise.wechat.secret + secret + + + + + enterprise.wechat.agent.id + agentId + + + + + enterprise.wechat.users + wechatUsers + + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-application-api.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-application-api.xml new file mode 100644 index 0000000000..766c0f477d --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-application-api.xml @@ -0,0 +1,87 @@ + + + + server.port + 12345 + + server port + + + int + + + + server.servlet.session.timeout + 7200 + + int + + + + + + server.servlet.context-path + /dolphinscheduler/ + + + + + spring.servlet.multipart.max-file-size + 1024 + + MB + int + + + + + + spring.servlet.multipart.max-request-size + 1024 + + MB + int + + + + + + server.jetty.max-http-post-size + 5000000 + + int + + + + + + spring.messages.encoding + UTF-8 + + + + spring.messages.basename + i18n/messages + + + + security.authentication.type + PASSWORD + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-common.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-common.xml new file mode 100644 index 0000000000..439e21188a --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-common.xml @@ -0,0 +1,158 @@ + + + + resource.storage.type + Choose Resource Upload Startup Type + + Resource upload startup type : HDFS,S3,NONE + + NONE + + value-list + + + HDFS + + + + S3 + + + + NONE + + + + 1 + + + + + resource.upload.path + /dolphinscheduler + + resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended + + + + + data.basedir.path + /tmp/dolphinscheduler + + user data local directory path, please make sure the directory exists and have read write permissions + + + + + + hadoop.security.authentication.startup.state + false + + value-list + + + true + + + + false + + + + 1 + + whether kerberos starts + + + java.security.krb5.conf.path + /opt/krb5.conf + + java.security.krb5.conf path + + + + + login.user.keytab.username + hdfs-mycluster@ESZ.COM + + LoginUserFromKeytab user + + + + + login.user.keytab.path + /opt/hdfs.headless.keytab + + LoginUserFromKeytab path + + + + + resource.view.suffixs + txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties + + + + hdfs.root.user + hdfs + + Users who have permission to create directories under the HDFS root path + + + + + fs.defaultFS + hdfs://mycluster:8020 + + HA or single namenode, + If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory, + support s3,for example : s3a://dolphinscheduler + + + + + fs.s3a.endpoint + http://host:9010 + + s3 need,s3 endpoint + + + + + fs.s3a.access.key + A3DXS30FO22544RE + + s3 need,s3 access key + + + + + fs.s3a.secret.key + OloCLq3n+8+sdPHUhJ21XrSxTC+JK + + s3 need,s3 secret key + + + + + kerberos.expire.time + 7 + + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml new file mode 100644 index 0000000000..6e50a1b649 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-datasource.xml @@ -0,0 +1,467 @@ + + + + spring.datasource.initialSize + 5 + + Init connection number + + + int + + + + + spring.datasource.minIdle + 5 + + Min connection number + + + int + + + + + spring.datasource.maxActive + 50 + + Max connection number + + + int + + + + + spring.datasource.maxWait + 60000 + + Max wait time for get a connection in milliseconds. + If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. + If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. + + + int + + + + + spring.datasource.timeBetweenEvictionRunsMillis + 60000 + + Milliseconds for check to close free connections + + + int + + + + + spring.datasource.timeBetweenConnectErrorMillis + 60000 + + The Destroy thread detects the connection interval and closes the physical connection in milliseconds + if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. + + + int + + + + + spring.datasource.minEvictableIdleTimeMillis + 300000 + + The longest time a connection remains idle without being evicted, in milliseconds + + + int + + + + + spring.datasource.validationQuery + SELECT 1 + + The SQL used to check whether the connection is valid requires a query statement. + If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. + + + + + spring.datasource.validationQueryTimeout + 3 + + int + + + Check whether the connection is valid for timeout, in seconds + + + + + spring.datasource.testWhileIdle + true + + boolean + + + When applying for a connection, + if it is detected that the connection is idle longer than time Between Eviction Runs Millis, + validation Query is performed to check whether the connection is valid + + + + + spring.datasource.testOnBorrow + true + + boolean + + + Execute validation to check if the connection is valid when applying for a connection + + + + + spring.datasource.testOnReturn + false + + boolean + + + Execute validation to check if the connection is valid when the connection is returned + + + + + spring.datasource.defaultAutoCommit + true + + boolean + + + + + + + spring.datasource.keepAlive + false + + boolean + + + + + + + + spring.datasource.poolPreparedStatements + true + + boolean + + + Open PSCache, specify count PSCache for every connection + + + + + spring.datasource.maxPoolPreparedStatementPerConnectionSize + 20 + + int + + + + + + spring.datasource.spring.datasource.filters + stat,wall,log4j + + + + + spring.datasource.connectionProperties + druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000 + + + + + + mybatis-plus.mapper-locations + classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml + + + + + mybatis-plus.typeEnumsPackage + org.apache.dolphinscheduler.*.enums + + + + + mybatis-plus.typeAliasesPackage + org.apache.dolphinscheduler.dao.entity + + Entity scan, where multiple packages are separated by a comma or semicolon + + + + + mybatis-plus.global-config.db-config.id-type + AUTO + + value-list + + + AUTO + + + + INPUT + + + + ID_WORKER + + + + UUID + + + + 1 + + + Primary key type AUTO:" database ID AUTO ", + INPUT:" user INPUT ID", + ID_WORKER:" global unique ID (numeric type unique ID)", + UUID:" global unique ID UUID"; + + + + + mybatis-plus.global-config.db-config.field-strategy + NOT_NULL + + value-list + + + IGNORED + + + + NOT_NULL + + + + NOT_EMPTY + + + + 1 + + + Field policy IGNORED:" ignore judgment ", + NOT_NULL:" not NULL judgment "), + NOT_EMPTY:" not NULL judgment" + + + + + mybatis-plus.global-config.db-config.column-underline + true + + boolean + + + + + + mybatis-plus.global-config.db-config.logic-delete-value + 1 + + int + + + + + + mybatis-plus.global-config.db-config.logic-not-delete-value + 0 + + int + + + + + + mybatis-plus.global-config.db-config.banner + true + + boolean + + + + + + + mybatis-plus.configuration.map-underscore-to-camel-case + true + + boolean + + + + + + mybatis-plus.configuration.cache-enabled + false + + boolean + + + + + + mybatis-plus.configuration.call-setters-on-nulls + true + + boolean + + + + + + mybatis-plus.configuration.jdbc-type-for-null + null + + + + + master.exec.threads + 100 + + int + + + + + + master.exec.task.num + 20 + + int + + + + + + master.heartbeat.interval + 10 + + int + + + + + + master.task.commit.retryTimes + 5 + + int + + + + + + master.task.commit.interval + 1000 + + int + + + + + + master.max.cpuload.avg + 100 + + int + + + + + + master.reserved.memory + 0.1 + + float + + + + + + worker.exec.threads + 100 + + int + + + + + + worker.heartbeat.interval + 10 + + int + + + + + + worker.fetch.task.num + 3 + + int + + + + + + worker.max.cpuload.avg + 100 + + int + + + + + + worker.reserved.memory + 0.1 + + float + + + + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-env.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-env.xml new file mode 100644 index 0000000000..8e14716d05 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-env.xml @@ -0,0 +1,123 @@ + + + + dolphin.database.type + mysql + Dolphin Scheduler DataBase Type Which Is Select + Dolphin Database Type + + value-list + + + mysql + + + + postgresql + + + + 1 + + + + + + dolphin.database.host + + Dolphin Database Host + + + + + dolphin.database.port + + Dolphin Database Port + + + + + dolphin.database.username + + Dolphin Database Username + + + + + dolphin.database.password + + Dolphin Database Password + PASSWORD + + password + + + + + + dolphin.user + + Which user to install and admin dolphin scheduler + Deploy User + + + + dolphin.group + + Which user to install and admin dolphin scheduler + Deploy Group + + + + + dolphinscheduler-env-content + Dolphinscheduler Env template + This is the jinja template for dolphinscheduler.env.sh file + # +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +export HADOOP_HOME=/opt/soft/hadoop +export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop +export SPARK_HOME1=/opt/soft/spark1 +export SPARK_HOME2=/opt/soft/spark2 +export PYTHON_HOME=/opt/soft/python +export JAVA_HOME=/opt/soft/java +export HIVE_HOME=/opt/soft/hive +export FLINK_HOME=/opt/soft/flink + + content + false + false + + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-master.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-master.xml new file mode 100644 index 0000000000..c8eec047fc --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-master.xml @@ -0,0 +1,88 @@ + + + + master.exec.threads + 100 + + int + + master execute thread num + + + + master.exec.task.num + 20 + + int + + master execute task number in parallel + + + + master.heartbeat.interval + 10 + + int + + master heartbeat interval + + + + master.task.commit.retryTimes + 5 + + int + + master commit task retry times + + + + master.task.commit.interval + 1000 + + int + + master commit task interval + + + + master.max.cpuload.avg + 100 + + int + + only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 + + + + master.reserved.memory + 0.3 + only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G. + + + + + master.listen.port + 5678 + + int + + master listen port + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-quartz.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-quartz.xml new file mode 100644 index 0000000000..7a0c68b051 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-quartz.xml @@ -0,0 +1,126 @@ + + + + org.quartz.scheduler.instanceName + DolphinScheduler + + + + + org.quartz.scheduler.instanceId + AUTO + + + + org.quartz.scheduler.makeSchedulerThreadDaemon + true + + boolean + + + + + org.quartz.jobStore.useProperties + false + + boolean + + + + + org.quartz.threadPool.class + org.quartz.simpl.SimpleThreadPool + + + + org.quartz.threadPool.makeThreadsDaemons + true + + boolean + + + + + org.quartz.threadPool.threadCount + 25 + + int + + + + + org.quartz.threadPool.threadPriority + 5 + + int + + + + + org.quartz.jobStore.class + org.quartz.impl.jdbcjobstore.JobStoreTX + + + + org.quartz.jobStore.tablePrefix + QRTZ_ + + + + org.quartz.jobStore.isClustered + true + + boolean + + + + + org.quartz.jobStore.misfireThreshold + 60000 + + int + + + + + org.quartz.jobStore.clusterCheckinInterval + 5000 + + int + + + + + org.quartz.jobStore.acquireTriggersWithinLock + true + + boolean + + + + + org.quartz.jobStore.dataSource + myDs + + + + org.quartz.dataSource.myDs.connectionProvider.class + org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml new file mode 100644 index 0000000000..97beade1bc --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-worker.xml @@ -0,0 +1,76 @@ + + + + worker.exec.threads + 100 + + int + + worker execute thread num + + + + worker.heartbeat.interval + 10 + + int + + worker heartbeat interval + + + + worker.fetch.task.num + 3 + + int + + submit the number of tasks at a time + + + + worker.max.cpuload.avg + 100 + + int + + only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 + + + + worker.reserved.memory + 0.3 + only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G. + + + + + worker.listen.port + 1234 + + int + + worker listen port + + + + worker.group + default + default worker group + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml new file mode 100644 index 0000000000..5882162254 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/configuration/dolphin-zookeeper.xml @@ -0,0 +1,84 @@ + + + + dolphinscheduler.queue.impl + zookeeper + + Task queue implementation, default "zookeeper" + + + + + zookeeper.dolphinscheduler.root + /dolphinscheduler + + dolphinscheduler root directory + + + + + zookeeper.session.timeout + 300 + + int + + + + + + + zookeeper.connection.timeout + 300 + + int + + + + + + + zookeeper.retry.base.sleep + 100 + + int + + + + + + + zookeeper.retry.max.sleep + 30000 + + int + + + + + + + zookeeper.retry.maxtime + 5 + + int + + + + + + \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml b/ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml new file mode 100644 index 0000000000..a559085f03 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml @@ -0,0 +1,137 @@ + + + + 2.0 + + + DOLPHIN + Dolphin Scheduler + 分布式易扩展的可视化DAG工作流任务调度系统 + 1.3.0 + + + DOLPHIN_MASTER + DS Master + MASTER + 1+ + + + PYTHON + 600 + + + + + DOLPHIN_LOGGER + DS Logger + SLAVE + 1+ + + + PYTHON + 600 + + + + + DOLPHIN_WORKER + DS Worker + SLAVE + 1+ + + + DOLPHIN/DOLPHIN_LOGGER + host + + true + + + + + + PYTHON + 600 + + + + + DOLPHIN_ALERT + DS Alert + SLAVE + 1 + + + PYTHON + 600 + + + + + DOLPHIN_API + DS_Api + SLAVE + 1 + + + PYTHON + 600 + + + + + + ZOOKEEPER + + + + + any + + + apache-dolphinscheduler-incubating-1.3.0* + + + + + + + dolphin-alert + dolphin-app-api + dolphin-app-dao + dolphin-common + dolphin-env + dolphin-quartz + + + + + theme.json + true + + + + quicklinks + + + quicklinks.json + true + + + + + diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py new file mode 100644 index 0000000000..87cc7b453b --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py @@ -0,0 +1,124 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import socket +import urllib2 +import os +import logging +import ambari_simplejson as json +from resource_management.libraries.script.script import Script +import sys +reload(sys) +sys.setdefaultencoding('utf-8') + +logger = logging.getLogger('ambari_alerts') + +config = Script.get_config() + + +def get_tokens(): + """ + Returns a tuple of tokens in the format {{site/property}} that will be used + to build the dictionary passed into execute + + :rtype tuple + """ + +def get_info(url, connection_timeout): + response = None + + try: + response = urllib2.urlopen(url, timeout=connection_timeout) + json_data = response.read() + return json_data + finally: + if response is not None: + try: + response.close() + except: + pass + + +def execute(configurations={}, parameters={}, host_name=None): + """ + Returns a tuple containing the result code and a pre-formatted result label + + Keyword arguments: + configurations : a mapping of configuration key to value + parameters : a mapping of script parameter key to value + host_name : the name of this host where the alert is running + + :type configurations dict + :type parameters dict + :type host_name str + """ + + alert_name = parameters['alertName'] + + dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler" + + pid = "0" + + + from resource_management.core import sudo + + is_running = True + pid_file_path = "" + if alert_name == 'DOLPHIN_MASTER': + pid_file_path = dolphin_pidfile_dir + "/master-server.pid" + elif alert_name == 'DOLPHIN_WORKER': + pid_file_path = dolphin_pidfile_dir + "/worker-server.pid" + elif alert_name == 'DOLPHIN_ALERT': + pid_file_path = dolphin_pidfile_dir + "/alert-server.pid" + elif alert_name == 'DOLPHIN_LOGGER': + pid_file_path = dolphin_pidfile_dir + "/logger-server.pid" + elif alert_name == 'DOLPHIN_API': + pid_file_path = dolphin_pidfile_dir + "/api-server.pid" + + if not pid_file_path or not os.path.isfile(pid_file_path): + is_running = False + + try: + pid = int(sudo.read_file(pid_file_path)) + except: + is_running = False + + try: + # Kill will not actually kill the process + # From the doc: + # If sig is 0, then no signal is sent, but error checking is still + # performed; this can be used to check for the existence of a + # process ID or process group ID. + sudo.kill(pid, 0) + except OSError: + is_running = False + + if host_name is None: + host_name = socket.getfqdn() + + if not is_running: + result_code = "CRITICAL" + else: + result_code = "OK" + + label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code) + + return ((result_code, [label])) + +if __name__ == "__main__": + pass diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py new file mode 100644 index 0000000000..62255a3432 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_alert_service.py @@ -0,0 +1,61 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import time +from resource_management import * + +from dolphin_env import dolphin_env + + +class DolphinAlertService(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) + + def configure(self, env): + import params + params.pika_slave = True + env.set_params(params) + + dolphin_env() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) + no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1") + + start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server") + Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) + + def stop(self, env): + import params + env.set_params(params) + stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server") + Execute(stop_cmd, user=params.dolphin_user) + time.sleep(5) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.dolphin_run_dir + "alert-server.pid") + + +if __name__ == "__main__": + DolphinAlertService().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py new file mode 100644 index 0000000000..bdc18fb602 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_api_service.py @@ -0,0 +1,70 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import time +from resource_management import * + +from dolphin_env import dolphin_env + + +class DolphinApiService(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) + + def configure(self, env): + import params + params.pika_slave = True + env.set_params(params) + + dolphin_env() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) + + #init + init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh") + Execute(init_cmd, user=params.dolphin_user) + + #upgrade + upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh") + Execute(upgrade_cmd, user=params.dolphin_user) + + no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1") + + start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server") + Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) + + def stop(self, env): + import params + env.set_params(params) + stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server") + Execute(stop_cmd, user=params.dolphin_user) + time.sleep(5) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.dolphin_run_dir + "api-server.pid") + + +if __name__ == "__main__": + DolphinApiService().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_env.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_env.py new file mode 100644 index 0000000000..1661d76c75 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_env.py @@ -0,0 +1,123 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * + + +def dolphin_env(): + import params + + Directory(params.dolphin_pidfile_dir, + mode=0777, + owner=params.dolphin_user, + group=params.dolphin_group, + create_parents=True + ) + Directory(params.dolphin_log_dir, + mode=0777, + owner=params.dolphin_user, + group=params.dolphin_group, + create_parents=True + ) + Directory(params.dolphin_conf_dir, + mode=0777, + owner=params.dolphin_user, + group=params.dolphin_group, + create_parents=True + ) + + Directory(params.dolphin_common_map['data.basedir.path'], + mode=0777, + owner=params.dolphin_user, + group=params.dolphin_group, + create_parents=True + ) + + + File(format(params.dolphin_env_path), + mode=0777, + content=InlineTemplate(params.dolphin_env_content), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + + File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"), + mode=0755, + content=Template("dolphin-daemon.sh.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/master.properties"), + mode=0755, + content=Template("master.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/worker.properties"), + mode=0755, + content=Template("worker.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + + File(format(params.dolphin_conf_dir + "/alert.properties"), + mode=0755, + content=Template("alert.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/datasource.properties"), + mode=0755, + content=Template("datasource.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/application-api.properties"), + mode=0755, + content=Template("application-api.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/common.properties"), + mode=0755, + content=Template("common.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/quartz.properties"), + mode=0755, + content=Template("quartz.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) + + File(format(params.dolphin_conf_dir + "/zookeeper.properties"), + mode=0755, + content=Template("zookeeper.properties.j2"), + owner=params.dolphin_user, + group=params.dolphin_group + ) diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py new file mode 100644 index 0000000000..f1c19bd66f --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_logger_service.py @@ -0,0 +1,61 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import time +from resource_management import * + +from dolphin_env import dolphin_env + + +class DolphinLoggerService(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) + + def configure(self, env): + import params + params.pika_slave = True + env.set_params(params) + + dolphin_env() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) + no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1") + + start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server") + Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) + + def stop(self, env): + import params + env.set_params(params) + stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server") + Execute(stop_cmd, user=params.dolphin_user) + time.sleep(5) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.dolphin_run_dir + "logger-server.pid") + + +if __name__ == "__main__": + DolphinLoggerService().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py new file mode 100644 index 0000000000..6ee7ecfcf3 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_master_service.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import time +from resource_management import * + +from dolphin_env import dolphin_env + + +class DolphinMasterService(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) + + def configure(self, env): + import params + params.pika_slave = True + env.set_params(params) + + dolphin_env() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) + no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1") + start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server") + Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) + + def stop(self, env): + import params + env.set_params(params) + stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server") + Execute(stop_cmd, user=params.dolphin_user) + time.sleep(5) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.dolphin_run_dir + "master-server.pid") + + +if __name__ == "__main__": + DolphinMasterService().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py new file mode 100644 index 0000000000..2d145ee730 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/dolphin_worker_service.py @@ -0,0 +1,60 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import time +from resource_management import * + +from dolphin_env import dolphin_env + + +class DolphinWorkerService(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True) + + def configure(self, env): + import params + params.pika_slave = True + env.set_params(params) + + dolphin_env() + + def start(self, env): + import params + env.set_params(params) + self.configure(env) + no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1") + start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server") + Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test) + + def stop(self, env): + import params + env.set_params(params) + stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server") + Execute(stop_cmd, user=params.dolphin_user) + time.sleep(5) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.dolphin_run_dir + "worker-server.pid") + + +if __name__ == "__main__": + DolphinWorkerService().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py new file mode 100644 index 0000000000..b09b2589f4 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/params.py @@ -0,0 +1,154 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + + +import sys +from resource_management import * +from resource_management.core.logger import Logger +from resource_management.libraries.functions import default + +Logger.initialize_logger() +reload(sys) +sys.setdefaultencoding('utf-8') + +# server configurations +config = Script.get_config() + +# conf_dir = "/etc/" +dolphin_home = "/opt/soft/dolphinscheduler" +dolphin_conf_dir = dolphin_home + "/conf" +dolphin_log_dir = dolphin_home + "/logs" +dolphin_bin_dir = dolphin_home + "/bin" +dolphin_lib_jars = dolphin_home + "/lib/*" +dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler" + +rmHosts = default("/clusterHostInfo/rm_host", []) + +# dolphin-env +dolphin_env_map = {} +dolphin_env_map.update(config['configurations']['dolphin-env']) + +# which user to install and admin dolphin scheduler +dolphin_user = dolphin_env_map['dolphin.user'] +dolphin_group = dolphin_env_map['dolphin.group'] + +# .dolphinscheduler_env.sh +dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh' +dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content'] + +# database config +dolphin_database_config = {} +dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type'] +dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username'] +dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password'] +if 'mysql' == dolphin_database_config['dolphin_database_type']: + dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver' + dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate' + dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \ + + ':' + dolphin_env_map['dolphin.database.port'] \ + + '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8' +else: + dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver' + dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate' + dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \ + + ':' + dolphin_env_map['dolphin.database.port'] \ + + '/dolphinscheduler' + + + + + +# application-alert.properties +dolphin_alert_map = {} +wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token' +wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret' +wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}' +wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}' + +dolphin_alert_config_map = config['configurations']['dolphin-alert'] + +if dolphin_alert_config_map['enterprise.wechat.enable']: + dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url + dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url + dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg + dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg + +dolphin_alert_map.update(dolphin_alert_config_map) + + + +# application-api.properties +dolphin_app_api_map = {} +dolphin_app_api_map.update(config['configurations']['dolphin-application-api']) + + +# common.properties +dolphin_common_map = {} + +if 'yarn-site' in config['configurations'] and \ + 'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']: + yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'] + yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s' + dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address + +rmHosts = default("/clusterHostInfo/rm_host", []) +if len(rmHosts) > 1: + dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts) +else: + dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = '' + +dolphin_common_map_tmp = config['configurations']['dolphin-common'] +data_basedir_path = dolphin_common_map_tmp['data.basedir.path'] +process_exec_basepath = data_basedir_path + '/exec' +data_download_basedir_path = data_basedir_path + '/download' +dolphin_common_map['process.exec.basepath'] = process_exec_basepath +dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path +dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path +dolphin_common_map.update(config['configurations']['dolphin-common']) + +# datasource.properties +dolphin_datasource_map = {} +dolphin_datasource_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource' +dolphin_datasource_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver'] +dolphin_datasource_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url'] +dolphin_datasource_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username'] +dolphin_datasource_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password'] +dolphin_datasource_map.update(config['configurations']['dolphin-datasource']) + +# master.properties +dolphin_master_map = config['configurations']['dolphin-master'] + +# quartz.properties +dolphin_quartz_map = {} +dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass'] +dolphin_quartz_map.update(config['configurations']['dolphin-quartz']) + +# worker.properties +dolphin_worker_map = config['configurations']['dolphin-worker'] + +# zookeeper.properties +dolphin_zookeeper_map={} +zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", []) +if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']: + clientPort = config['configurations']['zoo.cfg']['clientPort'] + zookeeperPort = ":" + clientPort + "," + dolphin_zookeeper_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort +dolphin_zookeeper_map.update(config['configurations']['dolphin-zookeeper']) + + + diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/service_check.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/service_check.py new file mode 100644 index 0000000000..0e12f69932 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/service_check.py @@ -0,0 +1,31 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management import * +from resource_management.libraries.functions import get_unique_id_and_date + +class ServiceCheck(Script): + def service_check(self, env): + import params + #env.set_params(params) + + # Execute(format("which pika_server")) + +if __name__ == "__main__": + ServiceCheck().execute() diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/status_params.py b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/status_params.py new file mode 100644 index 0000000000..24b2c8b1bc --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/scripts/status_params.py @@ -0,0 +1,23 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +from resource_management import * + +config = Script.get_config() + +dolphin_run_dir = "/opt/soft/run/dolphinscheduler/" diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/alert.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/alert.properties.j2 new file mode 100644 index 0000000000..73840b8c18 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/alert.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_alert_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/application-api.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/application-api.properties.j2 new file mode 100644 index 0000000000..70118003b9 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/application-api.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_app_api_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/common.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/common.properties.j2 new file mode 100644 index 0000000000..2220c4effa --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/common.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_common_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/datasource.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/datasource.properties.j2 new file mode 100644 index 0000000000..40aed83543 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/datasource.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_datasource_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2 new file mode 100644 index 0000000000..0802b74750 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/dolphin-daemon.sh.j2 @@ -0,0 +1,116 @@ +#!/bin/sh +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +usage="Usage: dolphinscheduler-daemon.sh (start|stop) " + +# if no args specified, show usage +if [ $# -le 1 ]; then + echo $usage + exit 1 +fi + +startStop=$1 +shift +command=$1 +shift + +echo "Begin $startStop $command......" + +BIN_DIR=`dirname $0` +BIN_DIR=`cd "$BIN_DIR"; pwd` +DOLPHINSCHEDULER_HOME=$BIN_DIR/.. + +export HOSTNAME=`hostname` + +DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}} + +DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70" +STOP_TIMEOUT=5 + +log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out +pid={{dolphin_pidfile_dir}}/$command.pid + +cd $DOLPHINSCHEDULER_HOME + +if [ "$command" = "api-server" ]; then + LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-api.xml -Dspring.profiles.active=api" + CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer +elif [ "$command" = "master-server" ]; then + LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-master.xml -Ddruid.mysql.usePingMethod=false" + CLASS=org.apache.dolphinscheduler.server.master.MasterServer +elif [ "$command" = "worker-server" ]; then + LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-worker.xml -Ddruid.mysql.usePingMethod=false" + CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer +elif [ "$command" = "alert-server" ]; then + LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-alert.xml" + CLASS=org.apache.dolphinscheduler.alert.AlertServer +elif [ "$command" = "logger-server" ]; then + CLASS=org.apache.dolphinscheduler.server.log.LoggerServer +else + echo "Error: No command named \`$command' was found." + exit 1 +fi + +case $startStop in + (start) + + if [ -f $pid ]; then + if kill -0 `cat $pid` > /dev/null 2>&1; then + echo $command running as process `cat $pid`. Stop it first. + exit 1 + fi + fi + + echo starting $command, logging to $log + + exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS" + + echo "nohup java $exec_command > $log 2>&1 < /dev/null &" + nohup java $exec_command > $log 2>&1 < /dev/null & + echo $! > $pid + ;; + + (stop) + + if [ -f $pid ]; then + TARGET_PID=`cat $pid` + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo stopping $command + kill $TARGET_PID + sleep $STOP_TIMEOUT + if kill -0 $TARGET_PID > /dev/null 2>&1; then + echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9" + kill -9 $TARGET_PID + fi + else + echo no $command to stop + fi + rm -f $pid + else + echo no $command to stop + fi + ;; + + (*) + echo $usage + exit 1 + ;; + +esac + +echo "End $startStop $command." \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/master.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/master.properties.j2 new file mode 100644 index 0000000000..d9b85e14cf --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/master.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_master_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/quartz.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/quartz.properties.j2 new file mode 100644 index 0000000000..e027a263b5 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/quartz.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_quartz_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/worker.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/worker.properties.j2 new file mode 100644 index 0000000000..a008b74084 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/worker.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_worker_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/zookeeper.properties.j2 b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/zookeeper.properties.j2 new file mode 100644 index 0000000000..9eb14eaef3 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/package/templates/zookeeper.properties.j2 @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +{% for key, value in dolphin_zookeeper_map.iteritems() -%} + {{key}}={{value}} +{% endfor %} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json b/ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json new file mode 100755 index 0000000000..8753004fef --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json @@ -0,0 +1,26 @@ +{ + "name": "default", + "description": "default quick links configuration", + "configuration": { + "protocol": + { + "type":"http" + }, + + "links": [ + { + "name": "dolphin-application-ui", + "label": "DolphinApplication UI", + "requires_user_name": "false", + "component_name": "DOLPHIN_API", + "url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html", + "port":{ + "http_property": "server.port", + "http_default_port": "12345", + "regex": "^(\\d+)$", + "site": "dolphin-application-api" + } + } + ] + } +} \ No newline at end of file diff --git a/ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json b/ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json new file mode 100644 index 0000000000..953e2323f8 --- /dev/null +++ b/ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json @@ -0,0 +1,661 @@ +{ + "name": "default", + "description": "Default theme for Dolphin Scheduler service", + "configuration": { + "layouts": [ + { + "name": "default", + "tabs": [ + { + "name": "settings", + "display-name": "Settings", + "layout": { + "tab-rows": "3", + "tab-columns": "3", + "sections": [ + { + "name": "dolphin-env-config", + "display-name": "Dolphin Env Config", + "row-index": "0", + "column-index": "0", + "row-span": "1", + "column-span": "2", + "section-rows": "1", + "section-columns": "2", + "subsections": [ + { + "name": "env-row1-col1", + "display-name": "Deploy User Info", + "row-index": "0", + "column-index": "0", + "row-span": "1", + "column-span": "1" + }, + { + "name": "env-row1-col2", + "display-name": "System Env Optimization", + "row-index": "0", + "column-index": "1", + "row-span": "1", + "column-span": "1" + } + ] + }, + { + "name": "dolphin-database-config", + "display-name": "Database Config", + "row-index": "1", + "column-index": "0", + "row-span": "1", + "column-span": "2", + "section-rows": "1", + "section-columns": "3", + "subsections": [ + { + "name": "database-row1-col1", + "row-index": "0", + "column-index": "0", + "row-span": "1", + "column-span": "1" + }, + { + "name": "database-row1-col2", + "row-index": "0", + "column-index": "1", + "row-span": "1", + "column-span": "1" + }, + { + "name": "database-row1-col3", + "row-index": "0", + "column-index": "2", + "row-span": "1", + "column-span": "1" + } + ] + }, + { + "name": "dynamic-config", + "row-index": "2", + "column-index": "0", + "row-span": "1", + "column-span": "2", + "section-rows": "1", + "section-columns": "3", + "subsections": [ + { + "name": "dynamic-row1-col1", + "display-name": "Resource FS Config", + "row-index": "0", + "column-index": "0", + "row-span": "1", + "column-span": "1" + }, + { + "name": "dynamic-row1-col2", + "display-name": "Kerberos Info", + "row-index": "0", + "column-index": "1", + "row-span": "1", + "column-span": "1" + }, + { + "name": "dynamic-row1-col3", + "display-name": "Wechat Info", + "row-index": "0", + "column-index": "1", + "row-span": "1", + "column-span": "1" + } + ] + } + ] + } + } + ] + } + ], + "placement": { + "configuration-layout": "default", + "configs": [ + { + "config": "dolphin-env/dolphin.database.type", + "subsection-name": "database-row1-col1" + }, + { + "config": "dolphin-env/dolphin.database.host", + "subsection-name": "database-row1-col2" + }, + { + "config": "dolphin-env/dolphin.database.port", + "subsection-name": "database-row1-col2" + }, + { + "config": "dolphin-env/dolphin.database.username", + "subsection-name": "database-row1-col3" + }, + { + "config": "dolphin-env/dolphin.database.password", + "subsection-name": "database-row1-col3" + }, + { + "config": "dolphin-env/dolphin.user", + "subsection-name": "env-row1-col1" + }, + { + "config": "dolphin-env/dolphin.group", + "subsection-name": "env-row1-col1" + }, + { + "config": "dolphin-env/dolphinscheduler-env-content", + "subsection-name": "env-row1-col2" + }, + { + "config": "dolphin-common/resource.storage.type", + "subsection-name": "dynamic-row1-col1" + }, + { + "config": "dolphin-common/resource.upload.path", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === HDFS || ${dolphin-common/resource.storage.type} === S3", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/hdfs.root.user", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === HDFS", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/data.store2hdfs.basepath", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === HDFS", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/fs.defaultFS", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === HDFS", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/fs.s3a.endpoint", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === S3", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/fs.s3a.access.key", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === S3", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/fs.s3a.secret.key", + "subsection-name": "dynamic-row1-col1", + "depends-on": [ + { + "configs":[ + "dolphin-common/resource.storage.type" + ], + "if": "${dolphin-common/resource.storage.type} === S3", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/hadoop.security.authentication.startup.state", + "subsection-name": "dynamic-row1-col2" + }, + { + "config": "dolphin-common/java.security.krb5.conf.path", + "subsection-name": "dynamic-row1-col2", + "depends-on": [ + { + "configs":[ + "dolphin-common/hadoop.security.authentication.startup.state" + ], + "if": "${dolphin-common/hadoop.security.authentication.startup.state}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/login.user.keytab.username", + "subsection-name": "dynamic-row1-col2", + "depends-on": [ + { + "configs":[ + "dolphin-common/hadoop.security.authentication.startup.state" + ], + "if": "${dolphin-common/hadoop.security.authentication.startup.state}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/login.user.keytab.path", + "subsection-name": "dynamic-row1-col2", + "depends-on": [ + { + "configs":[ + "dolphin-common/hadoop.security.authentication.startup.state" + ], + "if": "${dolphin-common/hadoop.security.authentication.startup.state}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-common/kerberos.expire.time", + "subsection-name": "dynamic-row1-col2", + "depends-on": [ + { + "configs":[ + "dolphin-common/hadoop.security.authentication.startup.state" + ], + "if": "${dolphin-common/hadoop.security.authentication.startup.state}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-alert/enterprise.wechat.enable", + "subsection-name": "dynamic-row1-col3" + }, + { + "config": "dolphin-alert/enterprise.wechat.corp.id", + "subsection-name": "dynamic-row1-col3", + "depends-on": [ + { + "configs":[ + "dolphin-alert/enterprise.wechat.enable" + ], + "if": "${dolphin-alert/enterprise.wechat.enable}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-alert/enterprise.wechat.secret", + "subsection-name": "dynamic-row1-col3", + "depends-on": [ + { + "configs":[ + "dolphin-alert/enterprise.wechat.enable" + ], + "if": "${dolphin-alert/enterprise.wechat.enable}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-alert/enterprise.wechat.agent.id", + "subsection-name": "dynamic-row1-col3", + "depends-on": [ + { + "configs":[ + "dolphin-alert/enterprise.wechat.enable" + ], + "if": "${dolphin-alert/enterprise.wechat.enable}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + }, + { + "config": "dolphin-alert/enterprise.wechat.users", + "subsection-name": "dynamic-row1-col3", + "depends-on": [ + { + "configs":[ + "dolphin-alert/enterprise.wechat.enable" + ], + "if": "${dolphin-alert/enterprise.wechat.enable}", + "then": { + "property_value_attributes": { + "visible": true + } + }, + "else": { + "property_value_attributes": { + "visible": false + } + } + } + ] + } + ] + }, + "widgets": [ + { + "config": "dolphin-env/dolphin.database.type", + "widget": { + "type": "combo" + } + }, + { + "config": "dolphin-env/dolphin.database.host", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-env/dolphin.database.port", + "widget": { + "type": "text-field", + "units": [ + { + "unit-name": "int" + } + ] + } + }, + { + "config": "dolphin-env/dolphin.database.username", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-env/dolphin.database.password", + "widget": { + "type": "password" + } + }, + { + "config": "dolphin-env/dolphin.user", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-env/dolphin.group", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-env/dolphinscheduler-env-content", + "widget": { + "type": "text-area" + } + }, + { + "config": "dolphin-common/resource.storage.type", + "widget": { + "type": "combo" + } + }, + { + "config": "dolphin-common/resource.upload.path", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/hdfs.root.user", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/data.store2hdfs.basepath", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/fs.defaultFS", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/fs.s3a.endpoint", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/fs.s3a.access.key", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/fs.s3a.secret.key", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/hadoop.security.authentication.startup.state", + "widget": { + "type": "toggle" + } + }, + { + "config": "dolphin-common/java.security.krb5.conf.path", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/login.user.keytab.username", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/login.user.keytab.path", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-common/kerberos.expire.time", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-alert/enterprise.wechat.enable", + "widget": { + "type": "toggle" + } + }, + { + "config": "dolphin-alert/enterprise.wechat.corp.id", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-alert/enterprise.wechat.secret", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-alert/enterprise.wechat.agent.id", + "widget": { + "type": "text-field" + } + }, + { + "config": "dolphin-alert/enterprise.wechat.users", + "widget": { + "type": "text-field" + } + } + ] + } +} diff --git a/ambari_plugin/statcks/DOLPHIN/metainfo.xml b/ambari_plugin/statcks/DOLPHIN/metainfo.xml index 3af7f383eb..ea40cd304d 100755 --- a/ambari_plugin/statcks/DOLPHIN/metainfo.xml +++ b/ambari_plugin/statcks/DOLPHIN/metainfo.xml @@ -20,7 +20,7 @@ DOLPHIN - common-services/DOLPHIN/2.0.0 + common-services/DOLPHIN/1.3.0 \ No newline at end of file diff --git a/charts/dolphinscheduler/README.md b/charts/dolphinscheduler/README.md deleted file mode 100644 index 6f0317b9e2..0000000000 --- a/charts/dolphinscheduler/README.md +++ /dev/null @@ -1,226 +0,0 @@ -# Dolphin Scheduler - -[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. - -## Introduction -This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes 1.10+ -- PV provisioner support in the underlying infrastructure - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```bash -$ git clone https://github.com/apache/incubator-dolphinscheduler.git -$ cd incubator-dolphinscheduler -$ helm install --name dolphinscheduler . -``` -These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `dolphinscheduler` deployment: - -```bash -$ helm delete --purge dolphinscheduler -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Configuration - -The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values. - -| Parameter | Description | Default | -| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- | -| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` | -| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` | -| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` | -| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` | -| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` | -| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` | -| | | | -| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` | -| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` | -| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` | -| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` | -| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` | -| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` | -| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` | -| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` | -| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` | -| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` | -| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` | -| | | | -| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` | -| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` | -| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` | -| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` | -| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` | -| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` | -| | | | -| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` | -| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` | -| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `master.tolerations` | If specified, the pod's tolerations | `{}` | -| `master.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` | -| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` | -| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` | -| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` | -| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` | -| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` | -| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` | -| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` | -| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` | -| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` | -| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `worker.tolerations` | If specified, the pod's tolerations | `{}` | -| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` | -| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` | -| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` | -| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` | -| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` | -| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` | -| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` | -| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` | -| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` | -| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` | -| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `alert.tolerations` | If specified, the pod's tolerations | `{}` | -| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` | -| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` | -| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` | -| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` | -| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` | -| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` | -| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` | -| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` | -| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` | -| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` | -| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` | -| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` | -| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `api.tolerations` | If specified, the pod's tolerations | `{}` | -| `api.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` | -| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` | -| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` | -| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` | -| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` | -| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` | -| `frontend.tolerations` | If specified, the pod's tolerations | `{}` | -| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` | -| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` | -| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` | -| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` | -| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` | -| | | | -| `ingress.enabled` | Enable ingress | `false` | -| `ingress.host` | Ingress host | `dolphinscheduler.org` | -| `ingress.path` | Ingress path | `/` | -| `ingress.tls.enabled` | Enable ingress tls | `false` | -| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` | -| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` | - -For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation. diff --git a/docker/docker-swarm/docker-compose.yml b/docker/docker-swarm/docker-compose.yml new file mode 100644 index 0000000000..ffa91a0ba8 --- /dev/null +++ b/docker/docker-swarm/docker-compose.yml @@ -0,0 +1,233 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.4" + +services: + + dolphinscheduler-postgresql: + image: bitnami/postgresql:latest + container_name: dolphinscheduler-postgresql + ports: + - 5432:5432 + environment: + TZ: Asia/Shanghai + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + volumes: + - dolphinscheduler-postgresql:/bitnami/postgresql + networks: + - dolphinscheduler + + dolphinscheduler-zookeeper: + image: bitnami/zookeeper:latest + container_name: dolphinscheduler-zookeeper + ports: + - 2181:2181 + environment: + TZ: Asia/Shanghai + ALLOW_ANONYMOUS_LOGIN: "yes" + volumes: + - dolphinscheduler-zookeeper:/bitnami/zookeeper + networks: + - dolphinscheduler + + dolphinscheduler-api: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + container_name: dolphinscheduler-api + command: ["api-server"] + ports: + - 12345:12345 + environment: + TZ: Asia/Shanghai + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + - dolphinscheduler-postgresql + - dolphinscheduler-zookeeper + volumes: + - dolphinscheduler-api:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + + dolphinscheduler-frontend: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + container_name: dolphinscheduler-frontend + command: ["frontend"] + ports: + - 8888:8888 + environment: + TZ: Asia/Shanghai + FRONTEND_API_SERVER_HOST: dolphinscheduler-api + FRONTEND_API_SERVER_PORT: 12345 + healthcheck: + test: ["CMD", "nc", "-z", "localhost", "8888"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + - dolphinscheduler-api + volumes: + - dolphinscheduler-frontend:/var/log/nginx + networks: + - dolphinscheduler + + dolphinscheduler-alert: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + container_name: dolphinscheduler-alert + command: ["alert-server"] + environment: + TZ: Asia/Shanghai + XLS_FILE_PATH: "/tmp/xls" + MAIL_SERVER_HOST: "" + MAIL_SERVER_PORT: "" + MAIL_SENDER: "" + MAIL_USER: "" + MAIL_PASSWD: "" + MAIL_SMTP_STARTTLS_ENABLE: "false" + MAIL_SMTP_SSL_ENABLE: "false" + MAIL_SMTP_SSL_TRUST: "" + ENTERPRISE_WECHAT_ENABLE: "false" + ENTERPRISE_WECHAT_CORP_ID: "" + ENTERPRISE_WECHAT_SECRET: "" + ENTERPRISE_WECHAT_AGENT_ID: "" + ENTERPRISE_WECHAT_USERS: "" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "AlertServer"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + - dolphinscheduler-postgresql + volumes: + - dolphinscheduler-alert:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + + dolphinscheduler-master: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + container_name: dolphinscheduler-master + command: ["master-server"] + ports: + - 5678:5678 + environment: + TZ: Asia/Shanghai + MASTER_EXEC_THREADS: "100" + MASTER_EXEC_TASK_NUM: "20" + MASTER_HEARTBEAT_INTERVAL: "10" + MASTER_TASK_COMMIT_RETRYTIMES: "5" + MASTER_TASK_COMMIT_INTERVAL: "1000" + MASTER_MAX_CPULOAD_AVG: "100" + MASTER_RESERVED_MEMORY: "0.1" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "MasterServer"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + - dolphinscheduler-postgresql + - dolphinscheduler-zookeeper + volumes: + - dolphinscheduler-master:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + + dolphinscheduler-worker: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + container_name: dolphinscheduler-worker + command: ["worker-server"] + ports: + - 1234:1234 + - 50051:50051 + environment: + TZ: Asia/Shanghai + WORKER_EXEC_THREADS: "100" + WORKER_HEARTBEAT_INTERVAL: "10" + WORKER_FETCH_TASK_NUM: "3" + WORKER_MAX_CPULOAD_AVG: "100" + WORKER_RESERVED_MEMORY: "0.1" + WORKER_GROUP: "default" + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 30s + depends_on: + - dolphinscheduler-postgresql + - dolphinscheduler-zookeeper + volumes: + - type: bind + source: ./dolphinscheduler_env.sh + target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh + - type: volume + source: dolphinscheduler-worker-data + target: /tmp/dolphinscheduler + - type: volume + source: dolphinscheduler-worker-logs + target: /opt/dolphinscheduler/logs + networks: + - dolphinscheduler + +networks: + dolphinscheduler: + driver: bridge + +volumes: + dolphinscheduler-postgresql: + dolphinscheduler-zookeeper: + dolphinscheduler-api: + dolphinscheduler-frontend: + dolphinscheduler-alert: + dolphinscheduler-master: + dolphinscheduler-worker-data: + dolphinscheduler-worker-logs: + +configs: + dolphinscheduler-worker-task-env: + file: ./dolphinscheduler_env.sh \ No newline at end of file diff --git a/docker/docker-swarm/docker-stack.yml b/docker/docker-swarm/docker-stack.yml new file mode 100644 index 0000000000..e628205ade --- /dev/null +++ b/docker/docker-swarm/docker-stack.yml @@ -0,0 +1,230 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.4" + +services: + + dolphinscheduler-postgresql: + image: bitnami/postgresql:latest + ports: + - 5432:5432 + environment: + TZ: Asia/Shanghai + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + volumes: + - dolphinscheduler-postgresql:/bitnami/postgresql + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-zookeeper: + image: bitnami/zookeeper:latest + ports: + - 2181:2181 + environment: + TZ: Asia/Shanghai + ALLOW_ANONYMOUS_LOGIN: "yes" + volumes: + - dolphinscheduler-zookeeper:/bitnami/zookeeper + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-api: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + command: ["api-server"] + ports: + - 12345:12345 + environment: + TZ: Asia/Shanghai + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"] + interval: 30 + timeout: 5s + retries: 3 + start_period: 30s + volumes: + - dolphinscheduler-api:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-frontend: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + command: ["frontend"] + ports: + - 8888:8888 + environment: + TZ: Asia/Shanghai + FRONTEND_API_SERVER_HOST: dolphinscheduler-api + FRONTEND_API_SERVER_PORT: 12345 + healthcheck: + test: ["CMD", "nc", "-z", "localhost", "8888"] + interval: 30 + timeout: 5s + retries: 3 + start_period: 30s + volumes: + - dolphinscheduler-frontend:/var/log/nginx + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-alert: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + command: ["alert-server"] + environment: + TZ: Asia/Shanghai + XLS_FILE_PATH: "/tmp/xls" + MAIL_SERVER_HOST: "" + MAIL_SERVER_PORT: "" + MAIL_SENDER: "" + MAIL_USER: "" + MAIL_PASSWD: "" + MAIL_SMTP_STARTTLS_ENABLE: "false" + MAIL_SMTP_SSL_ENABLE: "false" + MAIL_SMTP_SSL_TRUST: "" + ENTERPRISE_WECHAT_ENABLE: "false" + ENTERPRISE_WECHAT_CORP_ID: "" + ENTERPRISE_WECHAT_SECRET: "" + ENTERPRISE_WECHAT_AGENT_ID: "" + ENTERPRISE_WECHAT_USERS: "" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "AlertServer"] + interval: 30 + timeout: 5s + retries: 3 + start_period: 30s + volumes: + - dolphinscheduler-alert:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-master: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + command: ["master-server"] + ports: + - 5678:5678 + environment: + TZ: Asia/Shanghai + MASTER_EXEC_THREADS: "100" + MASTER_EXEC_TASK_NUM: "20" + MASTER_HEARTBEAT_INTERVAL: "10" + MASTER_TASK_COMMIT_RETRYTIMES: "5" + MASTER_TASK_COMMIT_INTERVAL: "1000" + MASTER_MAX_CPULOAD_AVG: "100" + MASTER_RESERVED_MEMORY: "0.1" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "MasterServer"] + interval: 30 + timeout: 5s + retries: 3 + start_period: 30s + volumes: + - dolphinscheduler-master:/opt/dolphinscheduler/logs + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + + dolphinscheduler-worker: + image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev + command: ["worker-server"] + ports: + - 1234:1234 + - 50051:50051 + environment: + TZ: Asia/Shanghai + WORKER_EXEC_THREADS: "100" + WORKER_HEARTBEAT_INTERVAL: "10" + WORKER_FETCH_TASK_NUM: "3" + WORKER_MAX_CPULOAD_AVG: "100" + WORKER_RESERVED_MEMORY: "0.1" + WORKER_GROUP: "default" + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" + POSTGRESQL_HOST: dolphinscheduler-postgresql + POSTGRESQL_PORT: 5432 + POSTGRESQL_USERNAME: root + POSTGRESQL_PASSWORD: root + POSTGRESQL_DATABASE: dolphinscheduler + ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181 + healthcheck: + test: ["CMD", "/root/checkpoint.sh", "WorkerServer"] + interval: 30 + timeout: 5s + retries: 3 + start_period: 30s + volumes: + - dolphinscheduler-worker-data:/tmp/dolphinscheduler + - dolphinscheduler-worker-logs:/opt/dolphinscheduler/logs + configs: + - source: dolphinscheduler-worker-task-env + target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh + networks: + - dolphinscheduler + deploy: + mode: replicated + replicas: 1 + +networks: + dolphinscheduler: + driver: overlay + +volumes: + dolphinscheduler-postgresql: + dolphinscheduler-zookeeper: + dolphinscheduler-api: + dolphinscheduler-frontend: + dolphinscheduler-alert: + dolphinscheduler-master: + dolphinscheduler-worker-data: + dolphinscheduler-worker-logs: + +configs: + dolphinscheduler-worker-task-env: + file: ./dolphinscheduler_env.sh \ No newline at end of file diff --git a/docker/docker-swarm/dolphinscheduler_env.sh b/docker/docker-swarm/dolphinscheduler_env.sh new file mode 100644 index 0000000000..654318cb41 --- /dev/null +++ b/docker/docker-swarm/dolphinscheduler_env.sh @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +export PYTHON_HOME=/usr/bin/python2 +export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk +export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH \ No newline at end of file diff --git a/charts/dolphinscheduler/Chart.yaml b/docker/kubernetes/dolphinscheduler/Chart.yaml similarity index 86% rename from charts/dolphinscheduler/Chart.yaml rename to docker/kubernetes/dolphinscheduler/Chart.yaml index 2c40f94d3c..ac989d571f 100644 --- a/charts/dolphinscheduler/Chart.yaml +++ b/docker/kubernetes/dolphinscheduler/Chart.yaml @@ -21,8 +21,8 @@ description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG wo home: https://dolphinscheduler.apache.org icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg keywords: - - dolphinscheduler - - Scheduler +- dolphinscheduler +- Scheduler # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives @@ -35,18 +35,18 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 0.1.0 +version: 1.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 1.2.1 +appVersion: 1.3.0 dependencies: - - name: postgresql - version: 8.x.x - repository: https://charts.bitnami.com/bitnami - condition: postgresql.enabled - - name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled +- name: postgresql + version: 8.x.x + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled +- name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled diff --git a/charts/README.md b/docker/kubernetes/dolphinscheduler/README.md similarity index 99% rename from charts/README.md rename to docker/kubernetes/dolphinscheduler/README.md index 6f0317b9e2..9e6d1c6448 100644 --- a/charts/README.md +++ b/docker/kubernetes/dolphinscheduler/README.md @@ -16,7 +16,9 @@ To install the chart with the release name `my-release`: ```bash $ git clone https://github.com/apache/incubator-dolphinscheduler.git -$ cd incubator-dolphinscheduler +$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm dependency update . $ helm install --name dolphinscheduler . ``` These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. diff --git a/docker/kubernetes/dolphinscheduler/requirements.yaml b/docker/kubernetes/dolphinscheduler/requirements.yaml new file mode 100644 index 0000000000..e219975995 --- /dev/null +++ b/docker/kubernetes/dolphinscheduler/requirements.yaml @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +dependencies: +- name: postgresql + version: 8.x.x + repository: https://charts.bitnami.com/bitnami + condition: postgresql.enabled +- name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/NOTES.txt b/docker/kubernetes/dolphinscheduler/templates/NOTES.txt similarity index 100% rename from charts/dolphinscheduler/templates/NOTES.txt rename to docker/kubernetes/dolphinscheduler/templates/NOTES.txt diff --git a/charts/dolphinscheduler/templates/_helpers.tpl b/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl similarity index 88% rename from charts/dolphinscheduler/templates/_helpers.tpl rename to docker/kubernetes/dolphinscheduler/templates/_helpers.tpl index 37fb034128..9ba290b771 100644 --- a/charts/dolphinscheduler/templates/_helpers.tpl +++ b/docker/kubernetes/dolphinscheduler/templates/_helpers.tpl @@ -130,20 +130,4 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this {{- define "dolphinscheduler.worker.base.dir" -}} {{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}} {{- printf "%s" $name | trunc 63 | trimSuffix "/" -}} -{{- end -}} - -{{/* -Create a default dolphinscheduler worker data download dir. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "dolphinscheduler.worker.data.download.dir" -}} -{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}} -{{- end -}} - -{{/* -Create a default dolphinscheduler worker process exec dir. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "dolphinscheduler.worker.process.exec.dir" -}} -{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}} {{- end -}} \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml similarity index 100% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml rename to docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml similarity index 90% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml rename to docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml index 8cce068276..da82d639cb 100644 --- a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml @@ -31,4 +31,6 @@ data: MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }} MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }} MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }} + MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }} + DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} {{- end }} \ No newline at end of file diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml similarity index 88% rename from charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml rename to docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml index be7391fb32..1e08b67b53 100644 --- a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml @@ -29,9 +29,9 @@ data: WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }} WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }} WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }} + WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }} + WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }} DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }} - DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }} - DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }} dolphinscheduler_env.sh: |- {{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }} {{ . }} diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml similarity index 95% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml rename to docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml index 26026f74b3..69662e95d9 100644 --- a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml @@ -166,19 +166,19 @@ spec: value: {{ template "dolphinscheduler.postgresql.fullname" . }} {{- else }} value: {{ .Values.externalDatabase.host | quote }} - {{- end }} + {{- end }} - name: POSTGRESQL_PORT {{- if .Values.postgresql.enabled }} value: "5432" {{- else }} value: {{ .Values.externalDatabase.port }} - {{- end }} + {{- end }} - name: POSTGRESQL_USERNAME {{- if .Values.postgresql.enabled }} value: {{ .Values.postgresql.postgresqlUsername }} {{- else }} value: {{ .Values.externalDatabase.username | quote }} - {{- end }} + {{- end }} - name: POSTGRESQL_PASSWORD valueFrom: secretKeyRef: @@ -188,14 +188,20 @@ spec: {{- else }} name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password - {{- end }} + {{- end }} + - name: POSTGRESQL_DATABASE + {{- if .Values.postgresql.enabled }} + value: {{ .Values.postgresql.postgresqlDatabase }} + {{- else }} + value: {{ .Values.externalDatabase.database | quote }} + {{- end }} {{- if .Values.alert.livenessProbe.enabled }} livenessProbe: exec: command: - sh - /root/checkpoint.sh - - worker-server + - AlertServer initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }} @@ -208,7 +214,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - AlertServer initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }} diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml similarity index 94% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml rename to docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml index 926ce3c062..487ede0b8f 100644 --- a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml @@ -99,19 +99,19 @@ spec: value: {{ template "dolphinscheduler.postgresql.fullname" . }} {{- else }} value: {{ .Values.externalDatabase.host | quote }} - {{- end }} + {{- end }} - name: POSTGRESQL_PORT {{- if .Values.postgresql.enabled }} value: "5432" {{- else }} value: {{ .Values.externalDatabase.port }} - {{- end }} + {{- end }} - name: POSTGRESQL_USERNAME {{- if .Values.postgresql.enabled }} value: {{ .Values.postgresql.postgresqlUsername }} {{- else }} value: {{ .Values.externalDatabase.username | quote }} - {{- end }} + {{- end }} - name: POSTGRESQL_PASSWORD valueFrom: secretKeyRef: @@ -122,6 +122,12 @@ spec: name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password {{- end }} + - name: POSTGRESQL_DATABASE + {{- if .Values.postgresql.enabled }} + value: {{ .Values.postgresql.postgresqlDatabase }} + {{- else }} + value: {{ .Values.externalDatabase.database | quote }} + {{- end }} - name: ZOOKEEPER_QUORUM {{- if .Values.zookeeper.enabled }} value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}" diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml rename to docker/kubernetes/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/ingress.yaml b/docker/kubernetes/dolphinscheduler/templates/ingress.yaml similarity index 100% rename from charts/dolphinscheduler/templates/ingress.yaml rename to docker/kubernetes/dolphinscheduler/templates/ingress.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml rename to docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml rename to docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml rename to docker/kubernetes/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/secret-external-postgresql.yaml b/docker/kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml similarity index 100% rename from charts/dolphinscheduler/templates/secret-external-postgresql.yaml rename to docker/kubernetes/dolphinscheduler/templates/secret-external-postgresql.yaml diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml similarity index 92% rename from charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml rename to docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml index ac974128b7..b142fac4fe 100644 --- a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml @@ -109,8 +109,8 @@ spec: args: - "master-server" ports: - - containerPort: 8888 - name: unused-tcp-port + - containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} + name: "master-port" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: TZ @@ -150,6 +150,16 @@ spec: configMapKeyRef: name: {{ include "dolphinscheduler.fullname" . }}-master key: MASTER_RESERVED_MEMORY + - name: MASTER_LISTEN_PORT + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: MASTER_LISTEN_PORT + - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: POSTGRESQL_HOST {{- if .Values.postgresql.enabled }} value: {{ template "dolphinscheduler.postgresql.fullname" . }} @@ -178,11 +188,11 @@ spec: name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password {{- end }} - - name: TASK_QUEUE - {{- if .Values.zookeeper.enabled }} - value: {{ .Values.zookeeper.taskQueue }} + - name: POSTGRESQL_DATABASE + {{- if .Values.postgresql.enabled }} + value: {{ .Values.postgresql.postgresqlDatabase }} {{- else }} - value: {{ .Values.externalZookeeper.taskQueue }} + value: {{ .Values.externalDatabase.database | quote }} {{- end }} - name: ZOOKEEPER_QUORUM {{- if .Values.zookeeper.enabled }} @@ -196,7 +206,7 @@ spec: command: - sh - /root/checkpoint.sh - - master-server + - MasterServer initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }} @@ -209,7 +219,7 @@ spec: command: - sh - /root/checkpoint.sh - - master-server + - MasterServer initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }} diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml similarity index 90% rename from charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml rename to docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml index a2407978b4..198cef43b6 100644 --- a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml @@ -109,6 +109,8 @@ spec: args: - "worker-server" ports: + - containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} + name: "worker-port" - containerPort: 50051 name: "logs-port" imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -140,6 +142,21 @@ spec: configMapKeyRef: name: {{ include "dolphinscheduler.fullname" . }}-worker key: WORKER_RESERVED_MEMORY + - name: WORKER_LISTEN_PORT + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-worker + key: WORKER_LISTEN_PORT + - name: WORKER_GROUP + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-worker + key: WORKER_GROUP + - name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH + valueFrom: + configMapKeyRef: + name: {{ include "dolphinscheduler.fullname" . }}-master + key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH - name: POSTGRESQL_HOST {{- if .Values.postgresql.enabled }} value: {{ template "dolphinscheduler.postgresql.fullname" . }} @@ -167,12 +184,12 @@ spec: {{- else }} name: {{ printf "%s-%s" .Release.Name "externaldb" }} key: db-password - {{- end }} - - name: TASK_QUEUE - {{- if .Values.zookeeper.enabled }} - value: {{ .Values.zookeeper.taskQueue }} + {{- end }} + - name: POSTGRESQL_DATABASE + {{- if .Values.postgresql.enabled }} + value: {{ .Values.postgresql.postgresqlDatabase }} {{- else }} - value: {{ .Values.externalZookeeper.taskQueue }} + value: {{ .Values.externalDatabase.database | quote }} {{- end }} - name: ZOOKEEPER_QUORUM {{- if .Values.zookeeper.enabled }} @@ -186,7 +203,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - WorkerServer initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }} timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }} @@ -199,7 +216,7 @@ spec: command: - sh - /root/checkpoint.sh - - worker-server + - WorkerServer initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }} periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }} timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }} @@ -247,7 +264,7 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }} + {{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }} @@ -264,7 +281,7 @@ spec: app.kubernetes.io/managed-by: {{ .Release.Service }} spec: accessModes: - {{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }} + {{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }} - {{ . | quote }} {{- end }} storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }} diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml similarity index 100% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml rename to docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml similarity index 100% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml rename to docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml similarity index 89% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml rename to docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml index 7aaf0b4353..7f82cff31e 100644 --- a/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml @@ -25,10 +25,10 @@ metadata: spec: clusterIP: "None" ports: - - port: 8888 - targetPort: tcp-port + - port: {{ .Values.master.configmap.MASTER_LISTEN_PORT }} + targetPort: master-port protocol: TCP - name: unused-tcp-port + name: master-port selector: app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml similarity index 89% rename from charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml rename to docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml index 3e92a349d4..fb3b85b5c3 100644 --- a/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml +++ b/docker/kubernetes/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml @@ -25,6 +25,10 @@ metadata: spec: clusterIP: "None" ports: + - port: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }} + targetPort: worker-port + protocol: TCP + name: worker-port - port: 50051 targetPort: logs-port protocol: TCP diff --git a/charts/dolphinscheduler/values.yaml b/docker/kubernetes/dolphinscheduler/values.yaml similarity index 98% rename from charts/dolphinscheduler/values.yaml rename to docker/kubernetes/dolphinscheduler/values.yaml index 962a031a0c..4f70afade5 100644 --- a/charts/dolphinscheduler/values.yaml +++ b/docker/kubernetes/dolphinscheduler/values.yaml @@ -27,7 +27,7 @@ timezone: "Asia/Shanghai" image: registry: "docker.io" repository: "dolphinscheduler" - tag: "1.2.1" + tag: "1.3.0" pullPolicy: "IfNotPresent" imagePullSecrets: [] @@ -56,6 +56,8 @@ externalDatabase: zookeeper: enabled: true taskQueue: "zookeeper" + service: + port: "2181" persistence: enabled: false size: "20Gi" @@ -91,6 +93,7 @@ master: MASTER_TASK_COMMIT_INTERVAL: "1000" MASTER_MAX_CPULOAD_AVG: "100" MASTER_RESERVED_MEMORY: "0.1" + MASTER_LISTEN_PORT: "5678" livenessProbe: enabled: true initialDelaySeconds: "30" @@ -156,6 +159,8 @@ worker: WORKER_FETCH_TASK_NUM: "3" WORKER_MAX_CPULOAD_AVG: "100" WORKER_RESERVED_MEMORY: "0.1" + WORKER_LISTEN_PORT: "1234" + WORKER_GROUP: "default" DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler" DOLPHINSCHEDULER_ENV: - "export HADOOP_HOME=/opt/soft/hadoop" diff --git a/dockerfile/Dockerfile b/dockerfile/Dockerfile index c48b51e377..bed8f6537c 100644 --- a/dockerfile/Dockerfile +++ b/dockerfile/Dockerfile @@ -27,7 +27,7 @@ ENV DEBIAN_FRONTEND noninteractive #If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example: #RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories RUN apk update && \ - apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip tini && \ + apk add dos2unix shadow bash openrc python python3 sudo vim wget iputils net-tools openssh-server py2-pip tini && \ apk add --update procps && \ openrc boot && \ pip install kazoo @@ -67,6 +67,7 @@ ADD ./checkpoint.sh /root/checkpoint.sh ADD ./startup-init-conf.sh /root/startup-init-conf.sh ADD ./startup.sh /root/startup.sh ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/ +ADD ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/ ADD conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/ RUN chmod +x /root/checkpoint.sh && \ chmod +x /root/startup-init-conf.sh && \ diff --git a/dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh b/dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh index 070c438bb6..78c8f98bc1 100644 --- a/dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh +++ b/dockerfile/conf/dolphinscheduler/env/dolphinscheduler_env.sh @@ -15,12 +15,6 @@ # limitations under the License. # -export HADOOP_HOME=/opt/soft/hadoop -export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop -export SPARK_HOME1=/opt/soft/spark1 -export SPARK_HOME2=/opt/soft/spark2 -export PYTHON_HOME=/opt/soft/python -export JAVA_HOME=/opt/soft/java -export HIVE_HOME=/opt/soft/hive -export FLINK_HOME=/opt/soft/flink -export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH +export PYTHON_HOME=/usr/bin/python2 +export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk +export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH diff --git a/dockerfile/conf/dolphinscheduler/logback/logback-alert.xml b/dockerfile/conf/dolphinscheduler/logback/logback-alert.xml new file mode 100644 index 0000000000..5d1c07858d --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/logback/logback-alert.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + ${log.base}/dolphinscheduler-alert.log + + ${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log + 20 + 64MB + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + + + + + \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/logback/logback-api.xml b/dockerfile/conf/dolphinscheduler/logback/logback-api.xml new file mode 100644 index 0000000000..2df90d8392 --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/logback/logback-api.xml @@ -0,0 +1,62 @@ + + + + + + + + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + + ${log.base}/dolphinscheduler-api-server.log + + INFO + + + ${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log + 168 + 64MB + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/logback/logback-master.xml b/dockerfile/conf/dolphinscheduler/logback/logback-master.xml new file mode 100644 index 0000000000..7410c01f05 --- /dev/null +++ b/dockerfile/conf/dolphinscheduler/logback/logback-master.xml @@ -0,0 +1,82 @@ + + + + + + + + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + + + INFO + + + + taskAppId + ${log.base} + + + + ${log.base}/${taskAppId}.log + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n + + UTF-8 + + true + + + + + + ${log.base}/dolphinscheduler-master.log + + + ${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log + 168 + 200MB + + + + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + + UTF-8 + + + + + + + + + + + \ No newline at end of file diff --git a/dockerfile/conf/dolphinscheduler/conf/worker_logback.xml b/dockerfile/conf/dolphinscheduler/logback/logback-worker.xml similarity index 84% rename from dockerfile/conf/dolphinscheduler/conf/worker_logback.xml rename to dockerfile/conf/dolphinscheduler/logback/logback-worker.xml index 1b09260334..be1d0acde5 100644 --- a/dockerfile/conf/dolphinscheduler/conf/worker_logback.xml +++ b/dockerfile/conf/dolphinscheduler/logback/logback-worker.xml @@ -1,4 +1,4 @@ - + - + + @@ -27,11 +28,15 @@ UTF-8 + + + INFO - + taskAppId ${log.base} @@ -41,7 +46,7 @@ ${log.base}/${taskAppId}.log - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n UTF-8 @@ -49,31 +54,30 @@ - ${log.base}/dolphinscheduler-worker.log - + INFO - + ${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log 168 200MB -       - [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n + [%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n UTF-8 -    - + + + \ No newline at end of file diff --git a/dockerfile/startup.sh b/dockerfile/startup.sh index 30439c2321..71ab506777 100644 --- a/dockerfile/startup.sh +++ b/dockerfile/startup.sh @@ -25,7 +25,9 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs # start postgresql initPostgreSQL() { echo "checking postgresql" - if [ -n "$(ifconfig | grep ${POSTGRESQL_HOST})" ]; then + if [[ "${POSTGRESQL_HOST}" = "127.0.0.1" || "${POSTGRESQL_HOST}" = "localhost" ]]; then + export PGPORT=${POSTGRESQL_PORT} + echo "start postgresql service" rc-service postgresql restart @@ -47,10 +49,21 @@ initPostgreSQL() { sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}" fi + echo "test postgresql service" + while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do + counter=$((counter+1)) + if [ $counter == 30 ]; then + echo "Error: Couldn't connect to postgresql." + exit 1 + fi + echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter." + sleep 5 + done + echo "connect postgresql service" - v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1") + v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1") if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then - echo "Can't connect to database...${v}" + echo "Error: Can't connect to database...${v}" exit 1 fi @@ -70,10 +83,10 @@ initZK() { while ! nc -z ${line%:*} ${line#*:}; do counter=$((counter+1)) if [ $counter == 30 ]; then - log "Error: Couldn't connect to zookeeper." + echo "Error: Couldn't connect to zookeeper." exit 1 fi - log "Trying to connect to zookeeper at ${line}. Attempt $counter." + echo "Trying to connect to zookeeper at ${line}. Attempt $counter." sleep 5 done done diff --git a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java index 852d245a2e..d0f3538c1b 100644 --- a/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java +++ b/dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/utils/EnterpriseWeChatUtilsTest.java @@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.common.enums.AlertType; import org.apache.dolphinscheduler.common.enums.ShowType; import org.apache.dolphinscheduler.dao.entity.Alert; import org.junit.Assert; +import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; @@ -54,11 +55,19 @@ public class EnterpriseWeChatUtilsTest { private static final String enterpriseWechatUsers="LiGang,journey"; private static final String msg = "hello world"; + private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"$toParty\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"$msg\\\"},\\\"safe\\\":\\\"0\\\"}"; + private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"$toUser\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"$msg\\\"}}"; - @Test - public void testIsEnable(){ + @Before + public void init(){ PowerMockito.mockStatic(PropertyUtils.class); Mockito.when(PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE)).thenReturn(true); + Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG)).thenReturn(enterpriseWechatUserSendMsg); + Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_TEAM_SEND_MSG)).thenReturn(enterpriseWechatTeamSendMsg); + } + + @Test + public void testIsEnable(){ Boolean weChartEnable = EnterpriseWeChatUtils.isEnable(); Assert.assertTrue(weChartEnable); } @@ -88,6 +97,7 @@ public class EnterpriseWeChatUtilsTest { @Test public void tesMakeUserSendMsg1(){ + String sendMsg = EnterpriseWeChatUtils.makeUserSendMsg(enterpriseWechatUsers, enterpriseWechatAgentId, msg); Assert.assertTrue(sendMsg.contains(enterpriseWechatUsers)); Assert.assertTrue(sendMsg.contains(enterpriseWechatAgentId)); diff --git a/dolphinscheduler-alert/src/test/resources/alert.properties b/dolphinscheduler-alert/src/test/resources/alert.properties deleted file mode 100644 index ce233cea37..0000000000 --- a/dolphinscheduler-alert/src/test/resources/alert.properties +++ /dev/null @@ -1,67 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# For unit test - -#alert type is EMAIL/SMS -alert.type=EMAIL - -# mail server configuration -mail.protocol=SMTP -mail.server.host=xxx.xxx.test -mail.server.port=25 -mail.sender=xxx@xxx.com -mail.user=xxx@xxx.com -mail.passwd=111111 - -# Test double -test.server.factor=3.0 - - -# Test NumberFormat -test.server.testnumber=abc - -# Test array -test.server.list=xxx.xxx.test1,xxx.xxx.test2,xxx.xxx.test3 - -# Test enum -test.server.enum1=MASTER -test.server.enum2=DEAD_SERVER -test.server.enum3=abc - -# TLS -mail.smtp.starttls.enable=true -# SSL -mail.smtp.ssl.enable=false -mail.smtp.ssl.trust=xxx.xxx.com - -#xls file path,need create if not exist -xls.file.path=/tmp/xls - -# Enterprise WeChat configuration -enterprise.wechat.enable=false -enterprise.wechat.corp.id=xxxxxxx -enterprise.wechat.secret=xxxxxxx -enterprise.wechat.agent.id=xxxxxxx -enterprise.wechat.users=xxxxxxx -enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret -enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token -enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"} -enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}} - - - diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java index b3fe1a9eef..4f3dafdf27 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java @@ -94,6 +94,30 @@ public class ProcessDefinitionController extends BaseController { return returnDataList(result); } + /** + * copy process definition + * + * @param loginUser login user + * @param projectName project name + * @param processId process definition id + * @return copy result code + */ + @ApiOperation(value = "copyProcessDefinition", notes= "COPY_PROCESS_DEFINITION_NOTES") + @ApiImplicitParams({ + @ApiImplicitParam(name = "processId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100") + }) + @PostMapping(value = "/copy") + @ResponseStatus(HttpStatus.OK) + @ApiException(COPY_PROCESS_DEFINITION_ERROR) + public Result copyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, + @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, + @RequestParam(value = "processId", required = true) int processId) throws JsonProcessingException { + logger.info("copy process definition, login user:{}, project name:{}, process definition id:{}", + loginUser.getUserName(), projectName, processId); + Map result = processDefinitionService.copyProcessDefinition(loginUser, projectName, processId); + return returnDataList(result); + } + /** * verify process definition name unique * diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java index 00665aae71..8c52dd4d50 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java @@ -168,15 +168,13 @@ public enum Status { PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error", "预览调度配置错误"), PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error", "解析调度表达式错误"), SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end", "开始时间不能和结束时间一样"), - DELETE_TENANT_BY_ID_FAIL(100142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"), - DELETE_TENANT_BY_ID_FAIL_DEFINES(100143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"), - DELETE_TENANT_BY_ID_FAIL_USERS(100144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"), - - DELETE_WORKER_GROUP_BY_ID_FAIL(100145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"), - - QUERY_WORKER_GROUP_FAIL(100146,"query worker group fail ", "查询worker分组失败"), - DELETE_WORKER_GROUP_FAIL(100147,"delete worker group fail ", "删除worker分组失败"), - + DELETE_TENANT_BY_ID_FAIL(10142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"), + DELETE_TENANT_BY_ID_FAIL_DEFINES(10143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"), + DELETE_TENANT_BY_ID_FAIL_USERS(10144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"), + DELETE_WORKER_GROUP_BY_ID_FAIL(10145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"), + QUERY_WORKER_GROUP_FAIL(10146,"query worker group fail ", "查询worker分组失败"), + DELETE_WORKER_GROUP_FAIL(10147,"delete worker group fail ", "删除worker分组失败"), + COPY_PROCESS_DEFINITION_ERROR(10148,"copy process definition error", "复制工作流错误"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"), UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"), diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ServiceException.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ServiceException.java new file mode 100644 index 0000000000..5669e6c3df --- /dev/null +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ServiceException.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dolphinscheduler.api.exceptions; + +import org.apache.dolphinscheduler.api.enums.Status; + + +/** + * service exception + */ +public class ServiceException extends RuntimeException { + + /** + * code + */ + private Integer code; + + public ServiceException() { + } + + public ServiceException(Status status) { + super(status.getMsg()); + this.code = status.getCode(); + } + + public ServiceException(Integer code,String message) { + super(message); + this.code = code; + } + + public ServiceException(String message) { + super(message); + } + + public Integer getCode() { + return this.code; + } + + public void setCode(Integer code) { + this.code = code; + } +} \ No newline at end of file diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java index 91316af455..2f44dee304 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java @@ -65,25 +65,24 @@ public class LoggerService { TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId); - if (taskInstance == null){ - return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg()); - } - - String host = Host.of(taskInstance.getHost()).getIp(); - if(StringUtils.isEmpty(host)){ + if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){ return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg()); } + String host = getHost(taskInstance.getHost()); Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg()); logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT); + String log = logClient.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(),skipLineNum,limit); result.setData(log); - logger.info(log); return result; } + + + /** * get log size * @@ -92,10 +91,24 @@ public class LoggerService { */ public byte[] getLogBytes(int taskInstId) { TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId); - if (taskInstance == null){ - throw new RuntimeException("task instance is null"); + if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){ + throw new RuntimeException("task instance is null or host is null"); } - String host = Host.of(taskInstance.getHost()).getIp(); + String host = getHost(taskInstance.getHost()); + return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath()); } + + + /** + * get host + * @param address address + * @return old version return true ,otherwise return false + */ + private String getHost(String address){ + if (Host.isOldVersion(address)){ + return address; + } + return Host.of(address).getIp(); + } } diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java index 4081cab732..14cadbf189 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java @@ -112,8 +112,13 @@ public class ProcessDefinitionService extends BaseDAGService { * @return create result code * @throws JsonProcessingException JsonProcessingException */ - public Map createProcessDefinition(User loginUser, String projectName, String name, - String processDefinitionJson, String desc, String locations, String connects) throws JsonProcessingException { + public Map createProcessDefinition(User loginUser, + String projectName, + String name, + String processDefinitionJson, + String desc, + String locations, + String connects) throws JsonProcessingException { Map result = new HashMap<>(5); Project project = projectMapper.queryByName(projectName); @@ -281,6 +286,41 @@ public class ProcessDefinitionService extends BaseDAGService { return result; } + /** + * copy process definition + * + * @param loginUser login user + * @param projectName project name + * @param processId process definition id + * @return copy result code + */ + public Map copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException{ + + Map result = new HashMap<>(5); + Project project = projectMapper.queryByName(projectName); + + Map checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); + Status resultStatus = (Status) checkResult.get(Constants.STATUS); + if (resultStatus != Status.SUCCESS) { + return checkResult; + } + + ProcessDefinition processDefinition = processDefineMapper.selectById(processId); + if (processDefinition == null) { + putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId); + return result; + } else { + return createProcessDefinition( + loginUser, + projectName, + processDefinition.getName()+"_copy_"+System.currentTimeMillis(), + processDefinition.getProcessDefinitionJson(), + processDefinition.getDescription(), + processDefinition.getLocations(), + processDefinition.getConnects()); + } + } + /** * update process definition * diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java index c98b7c31b9..8f3075476e 100644 --- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java +++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java @@ -26,6 +26,7 @@ import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor; import org.apache.dolphinscheduler.api.enums.Status; +import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; @@ -44,8 +45,10 @@ import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; +import java.io.IOException; import java.text.MessageFormat; import java.util.*; +import java.util.regex.Matcher; import java.util.stream.Collectors; import static org.apache.dolphinscheduler.common.Constants.*; @@ -234,9 +237,6 @@ public class ResourcesService extends BaseService { } Date now = new Date(); - - - Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now); try { @@ -317,7 +317,6 @@ public class ResourcesService extends BaseService { return result; } - if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) { putMsg(result, Status.SUCCESS); return result; @@ -325,9 +324,10 @@ public class ResourcesService extends BaseService { //check resource aleady exists String originFullName = resource.getFullName(); + String originResourceName = resource.getAlias(); String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name); - if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) { + if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource {} already exists, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; @@ -338,11 +338,24 @@ public class ResourcesService extends BaseService { if (StringUtils.isEmpty(tenantCode)){ return result; } + // verify whether the resource exists in storage + // get the path of origin file in storage + String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); + try { + if (!HadoopUtils.getInstance().exists(originHdfsFileName)) { + logger.error("{} not exist", originHdfsFileName); + putMsg(result,Status.RESOURCE_NOT_EXIST); + return result; + } + } catch (IOException e) { + logger.error(e.getMessage(),e); + throw new ServiceException(Status.HDFS_OPERATION_ERROR); + } + String nameWithSuffix = name; - String originResourceName = resource.getAlias(); + if (!resource.isDirectory()) { //get the file suffix - String suffix = originResourceName.substring(originResourceName.lastIndexOf(".")); //if the name without suffix then add it ,else use the origin name @@ -352,7 +365,7 @@ public class ResourcesService extends BaseService { } // updateResource data - List childrenResource = listAllChildren(resource); + List childrenResource = listAllChildren(resource,false); String oldFullName = resource.getFullName(); Date now = new Date(); @@ -364,10 +377,11 @@ public class ResourcesService extends BaseService { try { resourcesMapper.updateById(resource); if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) { + String matcherFullName = Matcher.quoteReplacement(fullName); List childResourceList = new ArrayList<>(); List resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()])); childResourceList = resourceList.stream().map(t -> { - t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName)); + t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); @@ -385,29 +399,24 @@ public class ResourcesService extends BaseService { result.setData(resultMap); } catch (Exception e) { logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e); - throw new RuntimeException(Status.UPDATE_RESOURCE_ERROR.getMsg()); + throw new ServiceException(Status.UPDATE_RESOURCE_ERROR); } // if name unchanged, return directly without moving on HDFS if (originResourceName.equals(name)) { return result; } - // get file hdfs path - // delete hdfs file by type - String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); + // get the path of dest file in hdfs String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName); + try { - if (HadoopUtils.getInstance().exists(originHdfsFileName)) { - logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); - HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); - } else { - logger.error("{} not exist", originHdfsFileName); - putMsg(result,Status.RESOURCE_NOT_EXIST); - } + logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); + HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); } catch (Exception e) { logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e); putMsg(result,Status.HDFS_COPY_FAIL); + throw new ServiceException(Status.HDFS_COPY_FAIL); } return result; @@ -542,34 +551,6 @@ public class ResourcesService extends BaseService { return result; } - /** - * get all resources - * @param loginUser login user - * @return all resource set - */ - /*private Set getAllResources(User loginUser, ResourceType type) { - int userId = loginUser.getId(); - boolean listChildren = true; - if(isAdmin(loginUser)){ - userId = 0; - listChildren = false; - } - List resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal()); - Set allResourceList = new HashSet<>(resourceList); - if (listChildren) { - Set authorizedIds = new HashSet<>(); - List authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList()); - if (CollectionUtils.isNotEmpty(authorizedDirecoty)) { - for(Resource resource : authorizedDirecoty){ - authorizedIds.addAll(listAllChildren(resource)); - } - List childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()])); - allResourceList.addAll(childrenResources); - } - } - return allResourceList; - }*/ - /** * query resource list * @@ -580,8 +561,11 @@ public class ResourcesService extends BaseService { public Map queryResourceJarList(User loginUser, ResourceType type) { Map result = new HashMap<>(5); - - List allResourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal(),0); + int userId = loginUser.getId(); + if(isAdmin(loginUser)){ + userId = 0; + } + List allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0); List resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter(); Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); @@ -631,7 +615,7 @@ public class ResourcesService extends BaseService { Map> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set resourceIdSet = resourceProcessMap.keySet(); // get all children of the resource - List allChildren = listAllChildren(resource); + List allChildren = listAllChildren(resource,true); Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]); //if resource type is UDF,need check whether it is bound by UDF functon @@ -1193,12 +1177,13 @@ public class ResourcesService extends BaseService { /** * list all children id - * @param resource resource + * @param resource resource + * @param containSelf whether add self to children list * @return all children id */ - List listAllChildren(Resource resource){ + List listAllChildren(Resource resource,boolean containSelf){ List childList = new ArrayList<>(); - if (resource.getId() != -1) { + if (resource.getId() != -1 && containSelf) { childList.add(resource.getId()); } diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages.properties b/dolphinscheduler-api/src/main/resources/i18n/messages.properties index c4ca13168d..369e5e3c72 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages.properties @@ -173,6 +173,7 @@ PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id +COPY_PROCESS_DEFINITION_NOTES=copy process definition QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties b/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties index e0c1c286d1..92df742613 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages_en_US.properties @@ -173,6 +173,7 @@ PROCESS_DEFINITION_ID=process definition id PROCESS_DEFINITION_IDS=process definition ids RELEASE_PROCESS_DEFINITION_NOTES=release process definition QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id +COPY_PROCESS_DEFINITION_NOTES=copy process definition QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list diff --git a/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties b/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties index af7fde5068..3b427912b5 100644 --- a/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties +++ b/dolphinscheduler-api/src/main/resources/i18n/messages_zh_CN.properties @@ -171,6 +171,7 @@ UPDATE_PROCESS_DEFINITION_NOTES=更新流程定义 PROCESS_DEFINITION_ID=流程定义ID RELEASE_PROCESS_DEFINITION_NOTES=发布流程定义 QUERY_PROCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID +COPY_PROCESS_DEFINITION_NOTES=复制流程定义 QUERY_PROCESS_DEFINITION_LIST_NOTES=查询流程定义列表 QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表 QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义 diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java index c028dd4167..a69df9744e 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java @@ -174,6 +174,21 @@ public class ProcessDefinitionControllerTest{ Assert.assertEquals(Status.SUCCESS.getCode(),response.getCode().intValue()); } + @Test + public void testCopyProcessDefinition() throws Exception { + + String projectName = "test"; + int id = 1; + + Map result = new HashMap<>(5); + putMsg(result, Status.SUCCESS); + + Mockito.when(processDefinitionService.copyProcessDefinition(user, projectName,id)).thenReturn(result); + Result response = processDefinitionController.copyProcessDefinition(user, projectName,id); + + Assert.assertEquals(Status.SUCCESS.getCode(),response.getCode().intValue()); + } + @Test public void testQueryProcessDefinitionList() throws Exception { diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/exceptions/ServiceExceptionTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/exceptions/ServiceExceptionTest.java new file mode 100644 index 0000000000..a574253d1d --- /dev/null +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/exceptions/ServiceExceptionTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dolphinscheduler.api.exceptions; + +import org.apache.dolphinscheduler.api.enums.Status; +import org.junit.Assert; +import org.junit.Test; + +public class ServiceExceptionTest { + @Test + public void getCodeTest(){ + ServiceException serviceException = new ServiceException(); + Assert.assertNull(serviceException.getCode()); + + serviceException = new ServiceException(Status.ALERT_GROUP_EXIST); + Assert.assertNotNull(serviceException.getCode()); + + serviceException = new ServiceException(10012, "alarm group already exists"); + Assert.assertNotNull(serviceException.getCode()); + } + @Test + public void getMessageTest(){ + ServiceException serviceException = new ServiceException(); + Assert.assertNull(serviceException.getMessage()); + + serviceException = new ServiceException(Status.ALERT_GROUP_EXIST); + Assert.assertNotNull(serviceException.getMessage()); + + serviceException = new ServiceException(10012, "alarm group already exists"); + Assert.assertNotNull(serviceException.getMessage()); + } +} diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java index a0da2289dc..5a03cdb268 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java @@ -198,6 +198,47 @@ public class ProcessDefinitionServiceTest { Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } + @Test + public void testCopyProcessDefinition() throws Exception{ + String projectName = "project_test1"; + Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName)); + + Project project = getProject(projectName); + + User loginUser = new User(); + loginUser.setId(-1); + loginUser.setUserType(UserType.GENERAL_USER); + + Map result = new HashMap<>(5); + //project check auth success, instance not exist + putMsg(result, Status.SUCCESS, projectName); + Mockito.when(projectService.checkProjectAndAuth(loginUser,project,projectName)).thenReturn(result); + + ProcessDefinition definition = getProcessDefinition(); + definition.setLocations("{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}"); + definition.setProcessDefinitionJson("{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\":\"ssh_test1\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\necho ${aa}\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},\"taskInstancePriority\":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}"); + definition.setConnects("[]"); + //instance exit + Mockito.when(processDefineMapper.selectById(46)).thenReturn(definition); + + Map createProcessResult = new HashMap<>(5); + putMsg(result, Status.SUCCESS); + + Mockito.when(processDefinitionService.createProcessDefinition( + loginUser, + definition.getProjectName(), + definition.getName(), + definition.getProcessDefinitionJson(), + definition.getDescription(), + definition.getLocations(), + definition.getConnects())).thenReturn(createProcessResult); + + Map successRes = processDefinitionService.copyProcessDefinition(loginUser, + "project_test1", 46); + + Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); + } + @Test public void deleteProcessDefinitionByIdTest() throws Exception { String projectName = "project_test1"; @@ -770,12 +811,14 @@ public class ProcessDefinitionServiceTest { * @return ProcessDefinition */ private ProcessDefinition getProcessDefinition(){ + ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(46); processDefinition.setName("test_pdf"); processDefinition.setProjectId(2); processDefinition.setTenantId(1); processDefinition.setDescription(""); + return processDefinition; } diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java index 4f9176d699..e52f4670e2 100644 --- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java +++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java @@ -19,12 +19,16 @@ package org.apache.dolphinscheduler.api.service; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import org.apache.dolphinscheduler.api.enums.Status; +import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.UserType; -import org.apache.dolphinscheduler.common.utils.*; +import org.apache.dolphinscheduler.common.utils.CollectionUtils; +import org.apache.dolphinscheduler.common.utils.FileUtils; +import org.apache.dolphinscheduler.common.utils.HadoopUtils; +import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; @@ -37,7 +41,6 @@ import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; -import org.omg.CORBA.Any; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; @@ -172,10 +175,29 @@ public class ResourcesServiceTest { logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(),result.getMsg()); + //RESOURCE_NOT_EXIST + user.setId(1); + Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser()); + Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); + PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(),Mockito.anyString())).thenReturn("test1"); + + try { + Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false); + } catch (IOException e) { + logger.error(e.getMessage(),e); + } + result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF); + Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg()); + //SUCCESS user.setId(1); Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser()); Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); + try { + Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true); + } catch (IOException e) { + logger.error(e.getMessage(),e); + } result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE); logger.info(result.toString()); @@ -199,21 +221,16 @@ public class ResourcesServiceTest { logger.info(result.toString()); Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg()); - //RESOURCE_NOT_EXIST - Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); - PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1"); + //SUCCESS + Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); + PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test"); try { - Mockito.when(hadoopUtils.exists("test")).thenReturn(true); - } catch (IOException e) { - e.printStackTrace(); + PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(),Mockito.anyString(),true,true)).thenReturn(true); + } catch (Exception e) { + logger.error(e.getMessage(),e); } - result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF); - logger.info(result.toString()); - Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg()); - //SUCCESS - PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test"); result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg()); diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java index 02f00ce330..963aff5f31 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java @@ -503,6 +503,9 @@ public class HadoopUtils implements Closeable { * @return hdfs file name */ public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) { + if (fileName.startsWith("/")) { + fileName = fileName.replaceFirst("/",""); + } return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName); } @@ -514,6 +517,9 @@ public class HadoopUtils implements Closeable { * @return get absolute path and name for file on hdfs */ public static String getHdfsResourceFileName(String tenantCode, String fileName) { + if (fileName.startsWith("/")) { + fileName = fileName.replaceFirst("/",""); + } return String.format("%s/%s", getHdfsResDir(tenantCode), fileName); } @@ -525,6 +531,9 @@ public class HadoopUtils implements Closeable { * @return get absolute path and name for udf file on hdfs */ public static String getHdfsUdfFileName(String tenantCode, String fileName) { + if (fileName.startsWith("/")) { + fileName = fileName.replaceFirst("/",""); + } return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName); } diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java index 00b8f1c5c6..e239fe7cb0 100644 --- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java +++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java @@ -127,6 +127,18 @@ public class HadoopUtilsTest { Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result); } + @Test + public void getHdfsResourceFileName() { + String result = hadoopUtils.getHdfsResourceFileName("11000","aa.txt"); + Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result); + } + + @Test + public void getHdfsUdfFileName() { + String result = hadoopUtils.getHdfsFileName(ResourceType.UDF,"11000","aa.txt"); + Assert.assertEquals("/dolphinscheduler/11000/udfs/aa.txt", result); + } + @Test public void isYarnEnabled() { boolean result = hadoopUtils.isYarnEnabled(); diff --git a/dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Host.java b/dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Host.java index 0d58227530..e9eaabcad6 100644 --- a/dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Host.java +++ b/dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Host.java @@ -91,6 +91,16 @@ public class Host implements Serializable { return host; } + /** + * whether old version + * @param address address + * @return old version is true , otherwise is false + */ + public static Boolean isOldVersion(String address){ + String[] parts = address.split(":"); + return parts.length != 2 ? true : false; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java index 6df82b5a60..d86374244f 100644 --- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java +++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java @@ -117,9 +117,12 @@ public class MasterServer { this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor()); this.nettyRemotingServer.start(); - // - this.zkMasterClient.start(); + // register this.masterRegistry.registry(); + + // self tolerant + this.zkMasterClient.start(); + // masterSchedulerService.start(); diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java index cdd9ff2219..3314789fdb 100644 --- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java +++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java @@ -48,13 +48,13 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import javax.annotation.PostConstruct; -import java.util.HashSet; +import java.util.ArrayList; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.apache.dolphinscheduler.common.Constants.*; +import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS; /** * TaskUpdateQueue consumer @@ -120,19 +120,31 @@ public class TaskPriorityQueueConsumer extends Thread{ Boolean result = false; while (Stopper.isRunning()){ try { - result = dispatcher.dispatch(executionContext); + result = dispatcher.dispatch(executionContext); } catch (ExecuteException e) { logger.error("dispatch error",e); ThreadUtils.sleep(SLEEP_TIME_MILLIS); } - if (result){ + if (result || taskInstanceIsFinalState(taskInstanceId)){ break; } } return result; } + + /** + * taskInstance is final state + * success,failure,kill,stop,pause,threadwaiting is final state + * @param taskInstanceId taskInstanceId + * @return taskInstance is final state + */ + public Boolean taskInstanceIsFinalState(int taskInstanceId){ + TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId); + return taskInstance.getState().typeIsFinished(); + } + /** * get TaskExecutionContext * @param taskInstanceId taskInstanceId @@ -328,36 +340,38 @@ public class TaskPriorityQueueConsumer extends Thread{ return false; } - /** - * create project resource files + * get resource full name list */ - private List getResourceFullNames(TaskNode taskNode){ - - Set resourceIdsSet = new HashSet<>(); + private List getResourceFullNames(TaskNode taskNode) { + List resourceFullNameList = new ArrayList<>(); AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams()); if (baseParam != null) { List projectResourceFiles = baseParam.getResourceFilesList(); if (projectResourceFiles != null) { - Stream resourceInfotream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId()); - resourceIdsSet.addAll(resourceInfotream.collect(Collectors.toSet())); - } - } + // filter the resources that the resource id equals 0 + Set oldVersionResources = projectResourceFiles.stream().filter(t -> t.getId() == 0).collect(Collectors.toSet()); + if (CollectionUtils.isNotEmpty(oldVersionResources)) { + resourceFullNameList.addAll(oldVersionResources.stream().map(resource -> resource.getRes()).collect(Collectors.toSet())); + } - if (CollectionUtils.isEmpty(resourceIdsSet)){ - return null; - } + // get the resource id in order to get the resource names in batch + Stream resourceIdStream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId()); + Set resourceIdsSet = resourceIdStream.collect(Collectors.toSet()); - Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]); + if (CollectionUtils.isNotEmpty(resourceIdsSet)) { + Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]); - List resources = processService.listResourceByIds(resourceIds); - - List resourceFullNames = resources.stream() - .map(resourceInfo -> resourceInfo.getFullName()) - .collect(Collectors.toList()); + List resources = processService.listResourceByIds(resourceIds); + resourceFullNameList.addAll(resources.stream() + .map(resourceInfo -> resourceInfo.getFullName()) + .collect(Collectors.toList())); + } + } + } - return resourceFullNames; + return resourceFullNameList; } } diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java index 9986b07319..105584fe99 100644 --- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java +++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java @@ -26,6 +26,7 @@ import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.thread.Stopper; +import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand; @@ -35,9 +36,12 @@ import org.apache.dolphinscheduler.server.master.cache.impl.TaskInstanceCacheMan import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext; import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; +import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; +import org.springframework.beans.factory.annotation.Autowired; import java.util.Date; +import java.util.Set; /** @@ -53,6 +57,12 @@ public class MasterTaskExecThread extends MasterBaseTaskExecThread { private NettyExecutorManager nettyExecutorManager; + + /** + * zookeeper register center + */ + private ZookeeperRegistryCenter zookeeperRegistryCenter; + /** * constructor of MasterTaskExecThread * @param taskInstance task instance @@ -61,6 +71,7 @@ public class MasterTaskExecThread extends MasterBaseTaskExecThread { super(taskInstance); this.taskInstanceCacheManager = SpringApplicationContext.getBean(TaskInstanceCacheManagerImpl.class); this.nettyExecutorManager = SpringApplicationContext.getBean(NettyExecutorManager.class); + this.zookeeperRegistryCenter = SpringApplicationContext.getBean(ZookeeperRegistryCenter.class); } /** @@ -175,6 +186,16 @@ public class MasterTaskExecThread extends MasterBaseTaskExecThread { } alreadyKilled = true; + String taskInstanceWorkerGroup = taskInstance.getWorkerGroup(); + + // not exists + if (!existsValidWorkerGroup(taskInstanceWorkerGroup)){ + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setEndTime(new Date()); + processService.updateTaskInstance(taskInstance); + return; + } + TaskKillRequestCommand killCommand = new TaskKillRequestCommand(); killCommand.setTaskInstanceId(taskInstance.getId()); @@ -185,10 +206,33 @@ public class MasterTaskExecThread extends MasterBaseTaskExecThread { nettyExecutorManager.executeDirectly(executionContext); - logger.info("master add kill task :{} id:{} to kill queue", + logger.info("master kill taskInstance name :{} taskInstance id:{}", taskInstance.getName(), taskInstance.getId() ); } + /** + * whether exists valid worker group + * @param taskInstanceWorkerGroup taskInstanceWorkerGroup + * @return whether exists + */ + public Boolean existsValidWorkerGroup(String taskInstanceWorkerGroup){ + Set workerGroups = zookeeperRegistryCenter.getWorkerGroupDirectly(); + // not worker group + if (CollectionUtils.isEmpty(workerGroups)){ + return false; + } + + // has worker group , but not taskInstance assigned worker group + if (!workerGroups.contains(taskInstanceWorkerGroup)){ + return false; + } + Set workers = zookeeperRegistryCenter.getWorkerGroupNodesDirectly(taskInstanceWorkerGroup); + if (CollectionUtils.isEmpty(workers)) { + return false; + } + return true; + } + /** * get task timeout parameter * @return TaskTimeoutParameter diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java index 22fa91dc1d..84e4e54a50 100644 --- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java +++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java @@ -16,7 +16,6 @@ */ package org.apache.dolphinscheduler.server.worker.task.sql; -import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import com.alibaba.fastjson.serializer.SerializerFeature; @@ -24,7 +23,6 @@ import org.apache.commons.lang.StringUtils; import org.apache.dolphinscheduler.alert.utils.MailUtils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.*; -import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.ShowType; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; @@ -37,7 +35,6 @@ import org.apache.dolphinscheduler.common.utils.*; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.datasource.BaseDataSource; import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory; -import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; @@ -78,6 +75,10 @@ public class SqlTask extends AbstractTask { */ private TaskExecutionContext taskExecutionContext; + /** + * default query sql limit + */ + private static final int LIMIT = 10000; public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger) { super(taskExecutionContext, logger); @@ -257,12 +258,15 @@ public class SqlTask extends AbstractTask { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); - while (resultSet.next()) { + int rowCount = 0; + + while (rowCount < LIMIT && resultSet.next()) { JSONObject mapOfColValues = new JSONObject(true); for (int i = 1; i <= num; i++) { mapOfColValues.put(md.getColumnName(i), resultSet.getObject(i)); } resultJSONArray.add(mapOfColValues); + rowCount++; } logger.debug("execute sql : {}", JSONObject.toJSONString(resultJSONArray, SerializerFeature.WriteMapNullValue)); diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java index 46f48b6d76..69aecee444 100644 --- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java +++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java @@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ZKNodeType; import org.apache.dolphinscheduler.common.model.Server; +import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; @@ -40,6 +41,8 @@ import org.springframework.stereotype.Component; import java.util.Date; import java.util.List; +import static org.apache.dolphinscheduler.common.Constants.*; + /** * zookeeper master client @@ -72,8 +75,13 @@ public class ZKMasterClient extends AbstractZKClient { // init system znode this.initSystemZNode(); - // check if fault tolerance is required?failure and tolerance - if (getActiveMasterNum() == 1 && checkZKNodeExists(OSUtils.getHost(), ZKNodeType.MASTER)) { + while (!checkZKNodeExists(OSUtils.getHost(), ZKNodeType.MASTER)){ + ThreadUtils.sleep(SLEEP_TIME_MILLIS); + } + + + // self tolerant + if (getActiveMasterNum() == 1) { failoverWorker(null, true); failoverMaster(null); } @@ -147,7 +155,7 @@ public class ZKMasterClient extends AbstractZKClient { * @throws Exception exception */ private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception { - if(StringUtils.isEmpty(serverHost)){ + if(StringUtils.isEmpty(serverHost) || serverHost.startsWith(OSUtils.getHost())){ return ; } switch (zkNodeType){ diff --git a/dolphinscheduler-server/src/main/resources/config/install_config.conf b/dolphinscheduler-server/src/main/resources/config/install_config.conf index 4671be7371..cba117e048 100644 --- a/dolphinscheduler-server/src/main/resources/config/install_config.conf +++ b/dolphinscheduler-server/src/main/resources/config/install_config.conf @@ -63,7 +63,8 @@ mailPassword="xxxxxxxxxx" # TLS mail protocol support starttlsEnable="false" -sslTrust="xxxxxxxxxx" +#note: sslTrust is the same as mailServerHost +sslTrust="smtp.exmail.qq.com" # SSL mail protocol support # note: The SSL protocol is enabled by default. diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java new file mode 100644 index 0000000000..b6f118a734 --- /dev/null +++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java @@ -0,0 +1,262 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.server.master.consumer; + +import org.apache.dolphinscheduler.common.enums.CommandType; +import org.apache.dolphinscheduler.common.enums.DbType; +import org.apache.dolphinscheduler.common.enums.ExecutionStatus; +import org.apache.dolphinscheduler.common.enums.Priority; +import org.apache.dolphinscheduler.dao.entity.*; +import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher; +import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; +import org.apache.dolphinscheduler.server.registry.DependencyConfig; +import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager; +import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; +import org.apache.dolphinscheduler.server.zk.SpringZKServer; +import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; +import org.apache.dolphinscheduler.service.process.ProcessService; +import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue; +import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl; +import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator; +import org.apache.dolphinscheduler.service.zk.ZookeeperConfig; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.util.Date; + + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class, + NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, TaskPriorityQueueConsumer.class, + ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class}) +public class TaskPriorityQueueConsumerTest { + + + @Autowired + private TaskPriorityQueue taskPriorityQueue; + + @Autowired + private TaskPriorityQueueConsumer taskPriorityQueueConsumer; + + @Autowired + private ProcessService processService; + + @Autowired + private ExecutorDispatcher dispatcher; + + @Before + public void init(){ + + Tenant tenant = new Tenant(); + tenant.setId(1); + tenant.setTenantCode("journey"); + tenant.setTenantName("journey"); + tenant.setDescription("journey"); + tenant.setQueueId(1); + tenant.setCreateTime(new Date()); + tenant.setUpdateTime(new Date()); + + Mockito.when(processService.getTenantForProcess(1,2)).thenReturn(tenant); + + Mockito.when(processService.queryUserQueueByProcessInstanceId(1)).thenReturn("default"); + } + + + @Test + public void testSHELLTask() throws Exception { + TaskInstance taskInstance = new TaskInstance(); + taskInstance.setId(1); + taskInstance.setTaskType("SHELL"); + taskInstance.setProcessDefinitionId(1); + taskInstance.setProcessInstanceId(1); + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"); + taskInstance.setProcessInstancePriority(Priority.MEDIUM); + taskInstance.setWorkerGroup("default"); + taskInstance.setExecutorId(2); + + ProcessInstance processInstance = new ProcessInstance(); + processInstance.setTenantId(1); + processInstance.setCommandType(CommandType.START_PROCESS); + taskInstance.setProcessInstance(processInstance); + + ProcessDefinition processDefinition = new ProcessDefinition(); + processDefinition.setUserId(2); + processDefinition.setProjectId(1); + taskInstance.setProcessDefine(processDefinition); + + Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance); + taskPriorityQueue.put("2_1_2_1_default"); + + Thread.sleep(10000); + } + + + @Test + public void testSQLTask() throws Exception { + TaskInstance taskInstance = new TaskInstance(); + taskInstance.setId(1); + taskInstance.setTaskType("SQL"); + taskInstance.setProcessDefinitionId(1); + taskInstance.setProcessInstanceId(1); + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setTaskJson("{\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-3655\",\"maxRetryTimes\":0,\"name\":\"UDF测试\",\"params\":\"{\\\"postStatements\\\":[],\\\"connParams\\\":\\\"\\\",\\\"receiversCc\\\":\\\"\\\",\\\"udfs\\\":\\\"1\\\",\\\"type\\\":\\\"HIVE\\\",\\\"title\\\":\\\"test\\\",\\\"sql\\\":\\\"select id,name,ds,zodia(ds) from t_journey_user\\\",\\\"preStatements\\\":[],\\\"sqlType\\\":0,\\\"receivers\\\":\\\"825193156@qq.com\\\",\\\"datasource\\\":3,\\\"showType\\\":\\\"TABLE\\\",\\\"localParams\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQL\"}"); + taskInstance.setProcessInstancePriority(Priority.MEDIUM); + taskInstance.setWorkerGroup("default"); + taskInstance.setExecutorId(2); + + ProcessInstance processInstance = new ProcessInstance(); + processInstance.setTenantId(1); + processInstance.setCommandType(CommandType.START_PROCESS); + taskInstance.setProcessInstance(processInstance); + + ProcessDefinition processDefinition = new ProcessDefinition(); + processDefinition.setUserId(2); + processDefinition.setProjectId(1); + taskInstance.setProcessDefine(processDefinition); + + Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance); + taskPriorityQueue.put("2_1_2_1_default"); + + DataSource dataSource = new DataSource(); + dataSource.setId(1); + dataSource.setName("sqlDatasource"); + dataSource.setType(DbType.MYSQL); + dataSource.setUserId(2); + dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); + dataSource.setCreateTime(new Date()); + dataSource.setUpdateTime(new Date()); + + Mockito.when(processService.findDataSourceById(1)).thenReturn(dataSource); + + Thread.sleep(10000); + } + + + @Test + public void testDataxTask() throws Exception { + TaskInstance taskInstance = new TaskInstance(); + taskInstance.setId(1); + taskInstance.setTaskType("DATAX"); + taskInstance.setProcessDefinitionId(1); + taskInstance.setProcessInstanceId(1); + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-97625\",\"maxRetryTimes\":0,\"name\":\"MySQL数据相互导入\",\"params\":\"{\\\"targetTable\\\":\\\"pv2\\\",\\\"postStatements\\\":[],\\\"jobSpeedRecord\\\":1000,\\\"customConfig\\\":0,\\\"dtType\\\":\\\"MYSQL\\\",\\\"dsType\\\":\\\"MYSQL\\\",\\\"jobSpeedByte\\\":0,\\\"dataSource\\\":80,\\\"dataTarget\\\":80,\\\"sql\\\":\\\"SELECT dt,count FROM pv\\\",\\\"preStatements\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"DATAX\",\"workerGroup\":\"default\"}"); + taskInstance.setProcessInstancePriority(Priority.MEDIUM); + taskInstance.setWorkerGroup("default"); + taskInstance.setExecutorId(2); + + ProcessInstance processInstance = new ProcessInstance(); + processInstance.setTenantId(1); + processInstance.setCommandType(CommandType.START_PROCESS); + taskInstance.setProcessInstance(processInstance); + + ProcessDefinition processDefinition = new ProcessDefinition(); + processDefinition.setUserId(2); + processDefinition.setProjectId(1); + taskInstance.setProcessDefine(processDefinition); + + Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance); + taskPriorityQueue.put("2_1_2_1_default"); + + + + DataSource dataSource = new DataSource(); + dataSource.setId(80); + dataSource.setName("datax"); + dataSource.setType(DbType.MYSQL); + dataSource.setUserId(2); + dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); + dataSource.setCreateTime(new Date()); + dataSource.setUpdateTime(new Date()); + + Mockito.when(processService.findDataSourceById(80)).thenReturn(dataSource); + + Thread.sleep(10000); + } + + + @Test + public void testSqoopTask() throws Exception { + TaskInstance taskInstance = new TaskInstance(); + taskInstance.setId(1); + taskInstance.setTaskType("SQOOP"); + taskInstance.setProcessDefinitionId(1); + taskInstance.setProcessInstanceId(1); + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-63634\",\"maxRetryTimes\":0,\"name\":\"MySQL数据导入HDSF\",\"params\":\"{\\\"sourceType\\\":\\\"MYSQL\\\",\\\"targetType\\\":\\\"HDFS\\\",\\\"targetParams\\\":\\\"{\\\\\\\"targetPath\\\\\\\":\\\\\\\"/test/datatest\\\\\\\",\\\\\\\"deleteTargetDir\\\\\\\":true,\\\\\\\"fileType\\\\\\\":\\\\\\\"--as-textfile\\\\\\\",\\\\\\\"compressionCodec\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"fieldsTerminated\\\\\\\":\\\\\\\",\\\\\\\",\\\\\\\"linesTerminated\\\\\\\":\\\\\\\"\\\\\\\\\\\\\\\\n\\\\\\\"}\\\",\\\"modelType\\\":\\\"import\\\",\\\"sourceParams\\\":\\\"{\\\\\\\"srcType\\\\\\\":\\\\\\\"MYSQL\\\\\\\",\\\\\\\"srcDatasource\\\\\\\":1,\\\\\\\"srcTable\\\\\\\":\\\\\\\"t_ds_user\\\\\\\",\\\\\\\"srcQueryType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcQuerySql\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcColumnType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcColumns\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcConditionList\\\\\\\":[],\\\\\\\"mapColumnHive\\\\\\\":[],\\\\\\\"mapColumnJava\\\\\\\":[]}\\\",\\\"localParams\\\":[],\\\"concurrency\\\":1}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQOOP\",\"workerGroup\":\"default\"}"); + taskInstance.setProcessInstancePriority(Priority.MEDIUM); + taskInstance.setWorkerGroup("default"); + taskInstance.setExecutorId(2); + + ProcessInstance processInstance = new ProcessInstance(); + processInstance.setTenantId(1); + processInstance.setCommandType(CommandType.START_PROCESS); + taskInstance.setProcessInstance(processInstance); + + ProcessDefinition processDefinition = new ProcessDefinition(); + processDefinition.setUserId(2); + processDefinition.setProjectId(1); + taskInstance.setProcessDefine(processDefinition); + + Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance); + taskPriorityQueue.put("2_1_2_1_default"); + + + + DataSource dataSource = new DataSource(); + dataSource.setId(1); + dataSource.setName("datax"); + dataSource.setType(DbType.MYSQL); + dataSource.setUserId(2); + dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}"); + dataSource.setCreateTime(new Date()); + dataSource.setUpdateTime(new Date()); + + Mockito.when(processService.findDataSourceById(1)).thenReturn(dataSource); + + Thread.sleep(10000); + } + + + @Test + public void testTaskInstanceIsFinalState(){ + TaskInstance taskInstance = new TaskInstance(); + taskInstance.setId(1); + taskInstance.setTaskType("SHELL"); + taskInstance.setProcessDefinitionId(1); + taskInstance.setProcessInstanceId(1); + taskInstance.setState(ExecutionStatus.KILL); + taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"); + taskInstance.setProcessInstancePriority(Priority.MEDIUM); + taskInstance.setWorkerGroup("default"); + taskInstance.setExecutorId(2); + + + Mockito.when( processService.findTaskInstanceById(1)).thenReturn(taskInstance); + + taskPriorityQueueConsumer.taskInstanceIsFinalState(1); + } + + +} diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThreadTest.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThreadTest.java new file mode 100644 index 0000000000..ebddec4fb1 --- /dev/null +++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThreadTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.dolphinscheduler.server.master.runner; + +import junit.framework.Assert; +import org.apache.dolphinscheduler.server.master.config.MasterConfig; +import org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer; +import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher; +import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; +import org.apache.dolphinscheduler.server.registry.DependencyConfig; +import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager; +import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; +import org.apache.dolphinscheduler.server.zk.SpringZKServer; +import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; +import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl; +import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator; +import org.apache.dolphinscheduler.service.zk.ZookeeperConfig; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.util.HashSet; +import java.util.Set; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class, + NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, TaskPriorityQueueConsumer.class, + ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, MasterConfig.class}) +public class MasterTaskExecThreadTest { + + @Test + public void testExistsValidWorkerGroup1(){ + ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class); + Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(null); + MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null); + masterTaskExecThread.existsValidWorkerGroup("default"); + } + @Test + public void testExistsValidWorkerGroup2(){ + ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class); + Set workerGorups = new HashSet<>(); + workerGorups.add("test1"); + workerGorups.add("test2"); + + Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(workerGorups); + MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null); + masterTaskExecThread.existsValidWorkerGroup("default"); + } + + @Test + public void testExistsValidWorkerGroup3(){ + ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class); + Set workerGorups = new HashSet<>(); + workerGorups.add("test1"); + + Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(workerGorups); + Mockito.when(zookeeperRegistryCenter.getWorkerGroupNodesDirectly("test1")).thenReturn(workerGorups); + MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null); + masterTaskExecThread.existsValidWorkerGroup("test1"); + } + + +} diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/DependencyConfig.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/DependencyConfig.java index 0adea44cfd..93d2b03010 100644 --- a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/DependencyConfig.java +++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/DependencyConfig.java @@ -20,11 +20,14 @@ package org.apache.dolphinscheduler.server.registry; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.mapper.*; import org.apache.dolphinscheduler.server.master.cache.impl.TaskInstanceCacheManagerImpl; +import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher; import org.apache.dolphinscheduler.server.master.dispatch.host.HostManager; import org.apache.dolphinscheduler.server.master.dispatch.host.RandomHostManager; import org.apache.dolphinscheduler.server.master.processor.queue.TaskResponseService; import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService; import org.apache.dolphinscheduler.service.process.ProcessService; +import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue; +import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl; import org.mockito.Mockito; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @@ -144,4 +147,9 @@ public class DependencyConfig { public TaskResponseService taskResponseService(){ return Mockito.mock(TaskResponseService.class); } + + @Bean + public TaskPriorityQueue taskPriorityQueue(){ + return new TaskPriorityQueueImpl(); + } } diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java index a0fee7c36e..78ba3a6b44 100644 --- a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java +++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/processor/TaskCallbackServiceTest.java @@ -133,8 +133,6 @@ public class TaskCallbackServiceTest { nettyRemotingClient.close(); } - - @Test(expected = IllegalArgumentException.class) public void testSendAckWithIllegalArgumentException(){ TaskExecuteAckCommand ackCommand = Mockito.mock(TaskExecuteAckCommand.class); @@ -178,39 +176,40 @@ public class TaskCallbackServiceTest { } } - @Test(expected = IllegalStateException.class) - public void testSendAckWithIllegalStateException2(){ - masterRegistry.registry(); - final NettyServerConfig serverConfig = new NettyServerConfig(); - serverConfig.setListenPort(30000); - NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); - nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); - nettyRemotingServer.start(); - - final NettyClientConfig clientConfig = new NettyClientConfig(); - NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); - Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000")); - taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1)); - channel.close(); - TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand(); - ackCommand.setTaskInstanceId(1); - ackCommand.setStartTime(new Date()); +// @Test(expected = IllegalStateException.class) +// public void testSendAckWithIllegalStateException2(){ +// masterRegistry.registry(); +// final NettyServerConfig serverConfig = new NettyServerConfig(); +// serverConfig.setListenPort(30000); +// NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig); +// nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor); +// nettyRemotingServer.start(); +// +// final NettyClientConfig clientConfig = new NettyClientConfig(); +// NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig); +// Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000")); +// taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1)); +// channel.close(); +// TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand(); +// ackCommand.setTaskInstanceId(1); +// ackCommand.setStartTime(new Date()); +// +// nettyRemotingServer.close(); +// +// taskCallbackService.sendAck(1, ackCommand.convert2Command()); +// try { +// Thread.sleep(5000); +// } catch (InterruptedException e) { +// e.printStackTrace(); +// } +// +// Stopper.stop(); +// +// try { +// Thread.sleep(5000); +// } catch (InterruptedException e) { +// e.printStackTrace(); +// } +// } - nettyRemotingServer.close(); - - taskCallbackService.sendAck(1, ackCommand.convert2Command()); - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - Stopper.stop(); - - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } } diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java index c69ea34c5c..26462d2337 100644 --- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java +++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java @@ -119,6 +119,10 @@ public class ProcessService { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } + if (processInstance.getCommandType().equals(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS)){ + delCommandByid(command.getId()); + return null; + } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue index 9d4189406c..3f85f36992 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue @@ -48,7 +48,7 @@
{{$t('Main jar package')}}
- +
{{ node.raw.fullName }}
@@ -557,4 +557,12 @@ } } } + .vue-treeselect--disabled { + .vue-treeselect__control { + background-color: #ecf3f8; + .vue-treeselect__single-value { + color: #6d859e; + } + } + } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue index a5c23d45c7..8fb2ebadfe 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/mr.vue @@ -44,7 +44,7 @@
{{$t('Main jar package')}}
- +
{{ node.raw.fullName }}
@@ -427,4 +427,12 @@ } } } + .vue-treeselect--disabled { + .vue-treeselect__control { + background-color: #ecf3f8; + .vue-treeselect__single-value { + color: #6d859e; + } + } + } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue index b9ed72c1ea..851f8bee8a 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/python.vue @@ -333,3 +333,13 @@ components: { mLocalParams, mListBox, mResources,Treeselect } } + \ No newline at end of file diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue index d4fa70e63f..7a462a1f27 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/shell.vue @@ -396,5 +396,12 @@ right: -12px; top: -16px; } - + .vue-treeselect--disabled { + .vue-treeselect__control { + background-color: #ecf3f8; + .vue-treeselect__single-value { + color: #6d859e; + } + } + } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue index 6e94374101..4cceb3b27b 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/spark.vue @@ -63,7 +63,7 @@
{{$t('Main jar package')}}
- +
{{ node.raw.fullName }}
@@ -606,4 +606,12 @@ } } } + .vue-treeselect--disabled { + .vue-treeselect__control { + background-color: #ecf3f8; + .vue-treeselect__single-value { + color: #6d859e; + } + } + } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue index 843c1eaf1f..9e7f3305e5 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue @@ -37,7 +37,7 @@
- {{$t('Table')}} + {{$t('TableMode')}} {{$t('Attachment')}}
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue b/dolphinscheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue index 22acfba408..daa30d7c44 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/instanceDetails.vue @@ -43,7 +43,7 @@ props: {}, methods: { ...mapMutations('dag', ['setIsDetails', 'resetParams']), - ...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getInstancedetail']), + ...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getInstancedetail','getResourcesListJar']), ...mapActions('security', ['getTenantList','getWorkerGroupsAll']), /** * init @@ -62,6 +62,8 @@ this.getProjectList(), // get resources this.getResourcesList(), + // get jar + this.getResourcesListJar(), // get worker group list this.getWorkerGroupsAll(), this.getTenantList() diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/list.vue index 53939f3f7b..95bdc2930c 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/list.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/list.vue @@ -46,7 +46,7 @@ {{$t('Timing state')}} - + {{$t('Operation')}} @@ -90,6 +90,7 @@ + v.code === code)[0].desc }, @@ -306,6 +307,21 @@ releaseState: 1 }) }, + /** + * copy + */ + _copyProcess (item) { + this.copyProcess({ + processId: item.id + }).then(res => { + this.$message.success(res.msg) + $('body').find('.tooltip.fade.top.in').remove() + this._onUpdate() + }).catch(e => { + this.$message.error(e.msg || '') + }) + }, + _export (item) { this.exportDefinition({ processDefinitionId: item.id, diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/start.vue b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/start.vue index ff56de53e5..e47d8c757f 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/start.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/start.vue @@ -193,6 +193,7 @@ runMode: 'RUN_MODE_SERIAL', processInstancePriority: 'MEDIUM', workerGroup: 'default' + } }, props: { @@ -277,6 +278,18 @@ this.workflowName = this.item.name this._getReceiver() + let stateWorkerGroupsList = this.store.state.security.workerGroupsListAll || [] + if (stateWorkerGroupsList.length) { + this.workerGroup = stateWorkerGroupsList[0].id + } else { + this.store.dispatch('security/getWorkerGroupsAll').then(res => { + this.$nextTick(() => { + if(res.length>0) { + this.workerGroup = res[0].id + } + }) + }) + } }, mounted () { this._getNotifyGroupList().then(() => { diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/index.vue index 1f28578504..5dd12b4355 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/index.vue @@ -35,7 +35,7 @@ - + @@ -64,7 +64,8 @@ pageNo: 1, searchVal: '', userId: '' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -98,6 +99,11 @@ * get data list */ _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getProcessListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -133,6 +139,9 @@ mounted() { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mList, mConditions, mSpin, mListConstruction, mSecondaryMenu, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/index.vue index b95d4ed720..891dc2e281 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/index.vue @@ -30,7 +30,7 @@ - + @@ -74,7 +74,8 @@ endDate: '', // Exectuor Name executorName: '' - } + }, + isLeft: true } }, props: {}, @@ -136,6 +137,11 @@ * @desc Prevent functions from being called multiple times */ _debounceGET: _.debounce(function (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this._getProcessInstanceListP(flag) }, 100, { 'leading': false, @@ -183,6 +189,7 @@ beforeDestroy () { // Destruction wheel clearInterval(this.setIntervalP) + sessionStorage.setItem('isLeft',1) }, components: { mList, mInstanceConditions, mSpin, mListConstruction, mSecondaryMenu, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue index 4cb166647e..594ffad14e 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue @@ -30,7 +30,7 @@ - + @@ -71,7 +71,8 @@ endDate: '', // Exectuor Name executorName: '' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -118,6 +119,11 @@ * @desc Prevent functions from being called multiple times */ _debounceGET: _.debounce(function (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this._getList(flag) }, 100, { 'leading': false, @@ -146,6 +152,7 @@ beforeDestroy () { // Destruction wheel clearInterval(this.setIntervalP) + sessionStorage.setItem('isLeft',1) }, components: { mList, mInstanceConditions, mSpin, mListConstruction, mSecondaryMenu, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue index 73ce023ee7..5cf343ddda 100755 --- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue @@ -38,7 +38,7 @@ - + @@ -67,7 +67,8 @@ pageNo: 1, searchVal: '', type: 'FILE' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -91,6 +92,11 @@ this.searchParams.pageSize = val }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getResourcesListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -125,6 +131,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mListConstruction, mConditions, mList, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue index 12be6b0bc8..dac5cc0a86 100755 --- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue @@ -42,7 +42,7 @@ - + @@ -73,6 +73,7 @@ searchVal: '', type: 'FILE' }, + isLeft: true, breadList: [] } }, @@ -97,6 +98,11 @@ this.searchParams.pageSize = val }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getResourcesListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -159,6 +165,9 @@ this.breadList = dir this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mListConstruction, mConditions, mList, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/index.vue index d6c79bd258..74b789fe93 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/index.vue @@ -36,8 +36,7 @@ - - + @@ -64,7 +63,8 @@ pageSize: 10, pageNo: 1, searchVal: '' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -110,6 +110,11 @@ this._debounceGET() }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getUdfFuncListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -137,6 +142,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mListConstruction, mConditions, mList, mSpin, mCreateUdf, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/index.vue index b87b17800a..4058f267b7 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/index.vue @@ -37,8 +37,7 @@ - - + @@ -66,7 +65,8 @@ pageNo: 1, searchVal: '', type: 'UDF' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -98,6 +98,11 @@ this._debounceGET() }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getResourcesListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -125,6 +130,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mListConstruction, mConditions, mList, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/index.vue index dd39716f9a..ea8c4d838f 100755 --- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/subUdfDirectory/index.vue @@ -41,7 +41,7 @@ - + @@ -72,6 +72,7 @@ searchVal: '', type: 'UDF' }, + isLeft: true, breadList: [] } }, @@ -106,6 +107,11 @@ this._debounceGET() }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getResourcesListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -160,6 +166,9 @@ this.breadList = dir this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mListConstruction, mConditions, mList, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/queue/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/queue/index.vue index 47ce14abd7..8e559e45a7 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/queue/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/queue/index.vue @@ -38,7 +38,7 @@ - + @@ -66,6 +66,7 @@ pageNo: 1, searchVal: '' }, + isLeft: true, isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER' } }, @@ -116,6 +117,11 @@ }) }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getQueueListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -143,6 +149,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mList, mListConstruction, mConditions, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/tenement/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/tenement/index.vue index 0c38f0a911..ca180b1718 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/tenement/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/tenement/index.vue @@ -39,7 +39,7 @@ - + @@ -67,6 +67,7 @@ pageNo: 1, searchVal: '' }, + isLeft: true, isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER' } }, @@ -120,6 +121,11 @@ }) }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getTenantListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -147,6 +153,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mList, mListConstruction, mConditions, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/index.vue index f8ad40df85..b09b08bcf6 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/index.vue @@ -39,7 +39,7 @@ - + @@ -65,7 +65,8 @@ pageSize: 10, pageNo: 1, searchVal: '' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -118,6 +119,11 @@ }) }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getUsersListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -145,6 +151,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mList, mListConstruction, mConditions, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningGroups/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningGroups/index.vue index e70ead46ce..ad604ba3a6 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningGroups/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/security/pages/warningGroups/index.vue @@ -39,7 +39,7 @@ - + @@ -67,6 +67,7 @@ pageNo: 1, searchVal: '' }, + isLeft: true, isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER' } }, @@ -120,6 +121,11 @@ }) }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getAlertgroupP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -147,6 +153,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mList, mListConstruction, mConditions, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/user/pages/token/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/user/pages/token/index.vue index 21d2becd32..3398acca13 100644 --- a/dolphinscheduler-ui/src/js/conf/home/pages/user/pages/token/index.vue +++ b/dolphinscheduler-ui/src/js/conf/home/pages/user/pages/token/index.vue @@ -40,7 +40,7 @@ - + @@ -67,7 +67,8 @@ pageSize: 10, pageNo: 1, searchVal: '' - } + }, + isLeft: true } }, mixins: [listUrlParamHandle], @@ -120,6 +121,11 @@ }) }, _getList (flag) { + if(sessionStorage.getItem('isLeft')==0) { + this.isLeft = false + } else { + this.isLeft = true + } this.isLoading = !flag this.getTokenListP(this.searchParams).then(res => { if(this.searchParams.pageNo>1 && res.totalList.length == 0) { @@ -147,6 +153,9 @@ mounted () { this.$modal.destroy() }, + beforeDestroy () { + sessionStorage.setItem('isLeft',1) + }, components: { mSecondaryMenu, mList, mListConstruction, mConditions, mSpin, mNoData } } diff --git a/dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js b/dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js index a63c9edb8c..f282c8e30a 100644 --- a/dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js +++ b/dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js @@ -90,6 +90,7 @@ export default { }) }) }, + /** * Get process definition DAG diagram details */ @@ -127,6 +128,22 @@ export default { }) }) }, + +/** + * Get process definition DAG diagram details + */ + copyProcess ({ state }, payload) { + return new Promise((resolve, reject) => { + io.post(`projects/${state.projectName}/process/copy`, { + processId: payload.processId + }, res => { + resolve(res) + }).catch(e => { + reject(e) + }) + }) + }, + /** * Get the process instance DAG diagram details */ diff --git a/dolphinscheduler-ui/src/js/conf/login/App.vue b/dolphinscheduler-ui/src/js/conf/login/App.vue index a79743a262..8d065920a8 100644 --- a/dolphinscheduler-ui/src/js/conf/login/App.vue +++ b/dolphinscheduler-ui/src/js/conf/login/App.vue @@ -84,6 +84,7 @@ this._gLogin().then(res => { setTimeout(() => { this.spinnerLoading = false + sessionStorage.setItem('isLeft',1); if (res.data.hasOwnProperty("sessionId")) { let sessionId=res.data.sessionId sessionStorage.setItem("sessionId", sessionId) diff --git a/dolphinscheduler-ui/src/js/module/components/nav/m_logo.png b/dolphinscheduler-ui/src/js/module/components/nav/m_logo.png deleted file mode 100644 index d597f2d0bf..0000000000 Binary files a/dolphinscheduler-ui/src/js/module/components/nav/m_logo.png and /dev/null differ diff --git a/dolphinscheduler-ui/src/js/module/components/nav/nav.vue b/dolphinscheduler-ui/src/js/module/components/nav/nav.vue index a46ff6fe74..7c82dc660e 100644 --- a/dolphinscheduler-ui/src/js/module/components/nav/nav.vue +++ b/dolphinscheduler-ui/src/js/module/components/nav/nav.vue @@ -387,7 +387,6 @@ .logo-m { width: 36px; height: 36px; - background: url("./m_logo.png"); margin: 0 auto; position: relative; top: 12px; diff --git a/dolphinscheduler-ui/src/js/module/components/secondaryMenu/secondaryMenu.vue b/dolphinscheduler-ui/src/js/module/components/secondaryMenu/secondaryMenu.vue index 102df8f910..5336555c21 100644 --- a/dolphinscheduler-ui/src/js/module/components/secondaryMenu/secondaryMenu.vue +++ b/dolphinscheduler-ui/src/js/module/components/secondaryMenu/secondaryMenu.vue @@ -63,7 +63,8 @@ menuList: menu(this.type), index: 0, id: this.$route.params.id, - isTogHide: false + isTogHide: false, + isLeft: true } }, props: { @@ -82,6 +83,11 @@ }, _toggleMenu () { this.isTogHide = !this.isTogHide + if(this.isTogHide) { + sessionStorage.setItem('isLeft',0) + } else { + sessionStorage.setItem('isLeft',1) + } } }, mounted () { diff --git a/dolphinscheduler-ui/src/js/module/components/spin/spin.vue b/dolphinscheduler-ui/src/js/module/components/spin/spin.vue index 95fc8e924e..7c6a9c3acf 100644 --- a/dolphinscheduler-ui/src/js/module/components/spin/spin.vue +++ b/dolphinscheduler-ui/src/js/module/components/spin/spin.vue @@ -47,7 +47,7 @@ #spin-model { position: fixed; left: 20px; - top: 80px; + top: 60px; background: #fff; z-index: 99; border-radius: 3px; @@ -69,11 +69,11 @@ } &.spin-sp1 { width: calc(100% - 40px); - height: calc(100% - 100px); + height: calc(100% - 60px); } &.spin-sp2 { - width: calc(100% - 240px); - height: calc(100% - 100px); + width: calc(100% - 220px); + height: calc(100% - 60px); left: 220px; } } diff --git a/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js b/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js index db19e1fc0e..0e83689149 100755 --- a/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js +++ b/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js @@ -115,6 +115,7 @@ export default { 'Title': 'Title', 'Please enter the title of email': 'Please enter the title of email', 'Table': 'Table', + 'TableMode': 'Table', 'Attachment': 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', @@ -538,7 +539,6 @@ export default { 'Whether directory': 'Whether directory', 'Yes': 'Yes', 'No': 'No', - 'Modify User': 'Modify User', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', diff --git a/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js b/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js index 80b45c7a11..134ca84a58 100755 --- a/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js +++ b/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js @@ -116,6 +116,7 @@ export default { 'Title': '主题', 'Please enter the title of email': '请输入邮件主题', 'Table': '表名', + 'TableMode': '表格', 'Attachment': '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', @@ -538,7 +539,6 @@ export default { 'Whether directory' : '是否文件夹', 'Yes': '是', 'No': '否', - 'Modify User': '修改用户', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', diff --git a/dolphinscheduler-ui/src/sass/conf/home/index.scss b/dolphinscheduler-ui/src/sass/conf/home/index.scss index 76b9f6da01..16f588363d 100644 --- a/dolphinscheduler-ui/src/sass/conf/home/index.scss +++ b/dolphinscheduler-ui/src/sass/conf/home/index.scss @@ -24,8 +24,13 @@ body { .ans-message-box,.ans-message-wrapper { z-index: 121 !important; } + &::-webkit-scrollbar { + width: 0; + height: 0; + } } + @media screen and (max-width: 960px){ .nav-model { .logo-box, diff --git a/dolphinscheduler-ui/src/view/home/index.html b/dolphinscheduler-ui/src/view/home/index.html index 70f528216c..2a18be212d 100644 --- a/dolphinscheduler-ui/src/view/home/index.html +++ b/dolphinscheduler-ui/src/view/home/index.html @@ -21,7 +21,9 @@ - + + + diff --git a/pom.xml b/pom.xml index 053652fc92..dad1e3696b 100644 --- a/pom.xml +++ b/pom.xml @@ -695,6 +695,7 @@ **/api/enums/testGetEnum.java **/api/enums/StatusTest.java **/api/exceptions/ApiExceptionHandlerTest.java + **/api/exceptions/ServiceExceptionTest.java **/api/interceptor/LoginHandlerInterceptorTest.java **/api/security/PasswordAuthenticatorTest.java **/api/security/SecurityConfigTest.java @@ -763,6 +764,7 @@ **/common/utils/HadoopUtilsTest.java **/common/utils/HttpUtilsTest.java **/common/ConstantsTest.java + **/common/utils/HadoopUtils.java **/dao/mapper/AccessTokenMapperTest.java **/dao/mapper/AlertGroupMapperTest.java **/dao/mapper/CommandMapperTest.java @@ -776,6 +778,8 @@ **/server/log/TaskLogDiscriminatorTest.java **/server/log/TaskLogFilterTest.java **/server/log/WorkerLogFilterTest.java + **/server/master/consumer/TaskPriorityQueueConsumerTest.java + **/server/master/runner/MasterTaskExecThreadTest.java **/server/master/dispatch/executor/NettyExecutorManagerTest.java **/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java **/server/master/dispatch/host/assign/RandomSelectorTest.java diff --git a/script/dolphinscheduler-daemon.sh b/script/dolphinscheduler-daemon.sh index 19669e5ede..b0437a30c2 100644 --- a/script/dolphinscheduler-daemon.sh +++ b/script/dolphinscheduler-daemon.sh @@ -41,7 +41,7 @@ export JAVA_HOME=$JAVA_HOME #export JAVA_HOME=/opt/soft/jdk export HOSTNAME=`hostname` -export DOLPHINSCHEDULER_PID_DIR=/tmp/ +export DOLPHINSCHEDULER_PID_DIR=$DOLPHINSCHEDULER_HOME/pid export DOLPHINSCHEDULER_LOG_DIR=$DOLPHINSCHEDULER_HOME/logs export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/* @@ -54,7 +54,7 @@ if [ ! -d "$DOLPHINSCHEDULER_LOG_DIR" ]; then fi log=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command-$HOSTNAME.out -pid=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command.pid +pid=$DOLPHINSCHEDULER_PID_DIR/dolphinscheduler-$command.pid cd $DOLPHINSCHEDULER_HOME diff --git a/script/env/dolphinscheduler_env.sh b/script/env/dolphinscheduler_env.sh index e5b99e2857..066f379875 100644 --- a/script/env/dolphinscheduler_env.sh +++ b/script/env/dolphinscheduler_env.sh @@ -23,4 +23,6 @@ export PYTHON_HOME=/opt/soft/python export JAVA_HOME=/opt/soft/java export HIVE_HOME=/opt/soft/hive export FLINK_HOME=/opt/soft/flink -export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$PATH +export DATAX_HOME=/opt/soft/dataX + +export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME/bin:$PATH