Browse Source

Merge pull request #6 from apache/dev

update
pull/2/head
Simon 4 years ago committed by GitHub
parent
commit
ded2f33a75
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/ci_e2e.yml
  2. 2
      .github/workflows/ci_ut.yml
  3. 1
      .gitignore
  4. 1
      .mvn/jvm.config
  5. 117
      .mvn/wrapper/MavenWrapperDownloader.java
  6. 2
      .mvn/wrapper/maven-wrapper.properties
  7. 2
      README.md
  8. 2
      README_zh_CN.md
  9. 33
      docker/postgres/docker-entrypoint-initdb/init.sql
  10. 2
      dockerfile/Dockerfile
  11. 24
      dockerfile/README.md
  12. 24
      dockerfile/README_zh_CN.md
  13. 8
      dockerfile/conf/dolphinscheduler/application-api.properties.tpl
  14. 115
      dockerfile/conf/dolphinscheduler/application.properties.tpl
  15. 76
      dockerfile/conf/dolphinscheduler/common.properties.tpl
  16. 79
      dockerfile/conf/dolphinscheduler/conf/worker_logback.xml
  17. 71
      dockerfile/conf/dolphinscheduler/datasource.properties.tpl
  18. 40
      dockerfile/conf/dolphinscheduler/master.properties.tpl
  19. 47
      dockerfile/conf/dolphinscheduler/quartz.properties.tpl
  20. 37
      dockerfile/conf/dolphinscheduler/worker.properties.tpl
  21. 29
      dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl
  22. 6
      dockerfile/startup-init-conf.sh
  23. 6
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
  24. 12
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/PropertyUtils.java
  25. 18
      dolphinscheduler-alert/src/main/resources/alert.properties
  26. 52
      dolphinscheduler-alert/src/main/resources/logback-alert.xml
  27. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/ApiApplicationServer.java
  28. 129
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/AccessTokenController.java
  29. 14
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java
  30. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/LoggerController.java
  31. 8
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java
  32. 20
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java
  33. 4
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/WorkerGroupController.java
  34. 13
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ProcessMeta.java
  35. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  36. 34
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ApiException.java
  37. 48
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ApiExceptionHandler.java
  38. 3
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AccessTokenService.java
  39. 7
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java
  40. 41
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java
  41. 5
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java
  42. 39
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
  43. 61
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
  44. 99
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
  45. 12
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
  46. 71
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java
  47. 28
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java
  48. 53
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/Result.java
  49. 62
      dolphinscheduler-api/src/main/resources/logback-api.xml
  50. 17
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/AccessTokenControllerTest.java
  51. 42
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/exceptions/ApiExceptionHandlerTest.java
  52. 30
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataAnalysisServiceTest.java
  53. 10
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java
  54. 13
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/LoggerServiceTest.java
  55. 32
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java
  56. 14
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessInstanceServiceTest.java
  57. 2
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
  58. 30
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java
  59. 17
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/WorkerGroupServiceTest.java
  60. 48
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/ResultTest.java
  61. 225
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
  62. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/CommandType.java
  63. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbType.java
  64. 9
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java
  65. 35
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskTimeoutStrategy.java
  66. 11
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UdfType.java
  67. 19
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java
  68. 3
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/Property.java
  69. 4
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
  70. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
  71. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
  72. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadPoolExecutors.java
  73. 73
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadUtils.java
  74. 29
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java
  75. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java
  76. 38
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java
  77. 41
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
  78. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
  79. 20
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/Preconditions.java
  80. 32
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/PropertyUtils.java
  81. 51
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ResInfo.java
  82. 51
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/dependent/DependentDateUtils.java
  83. 10
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/placeholder/PlaceholderUtils.java
  84. 80
      dolphinscheduler-common/src/main/resources/common.properties
  85. 169
      dolphinscheduler-common/src/main/resources/logback.xml
  86. 5
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java
  87. 16
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java
  88. 64
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/placeholder/TimePlaceholderUtilsTest.java
  89. 5
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java
  90. 34
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/TaskRecordDao.java
  91. 1
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java
  92. 27
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/ConnectionFactory.java
  93. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/DB2ServerDataSource.java
  94. 6
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/DataSourceFactory.java
  95. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java
  96. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/MySQLDataSource.java
  97. 16
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
  98. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/PostgreDataSource.java
  99. 46
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SQLServerDataSource.java
  100. 3
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java
  101. Some files were not shown because too many files have changed in this diff Show More

2
.github/workflows/ci_e2e.yml

@ -55,7 +55,7 @@ jobs:
run: sh ./dockerfile/hooks/check
- name: Prepare e2e env
run: |
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip libgbm1
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo dpkg -i google-chrome*.deb
sudo apt-get install -f -y

2
.github/workflows/ci_ut.yml

@ -83,4 +83,4 @@ jobs:
mkdir -p ${LOG_DIR}
cd ${DOCKER_DIR}
docker-compose logs db > ${LOG_DIR}/db.txt
continue-on-error: true
continue-on-error: true

1
.gitignore vendored

@ -147,3 +147,4 @@ dolphinscheduler-ui/dist/js/login/index.291b8e3.js.map
dolphinscheduler-ui/dist/lib/external/
/dolphinscheduler-dao/src/main/resources/dao/data_source.properties
!/zookeeper_data/

1
.mvn/jvm.config

@ -0,0 +1 @@
-Xmx1024m -XX:MaxMetaspaceSize=256m

117
.mvn/wrapper/MavenWrapperDownloader.java vendored

@ -0,0 +1,117 @@
/*
* Copyright 2007-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.*;
import java.io.*;
import java.nio.channels.*;
import java.util.Properties;
public class MavenWrapperDownloader {
private static final String WRAPPER_VERSION = "0.5.6";
/**
* Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided.
*/
private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/"
+ WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar";
/**
* Path to the maven-wrapper.properties file, which might contain a downloadUrl property to
* use instead of the default one.
*/
private static final String MAVEN_WRAPPER_PROPERTIES_PATH =
".mvn/wrapper/maven-wrapper.properties";
/**
* Path where the maven-wrapper.jar will be saved to.
*/
private static final String MAVEN_WRAPPER_JAR_PATH =
".mvn/wrapper/maven-wrapper.jar";
/**
* Name of the property which should be used to override the default download url for the wrapper.
*/
private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl";
public static void main(String args[]) {
System.out.println("- Downloader started");
File baseDirectory = new File(args[0]);
System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath());
// If the maven-wrapper.properties exists, read it and check if it contains a custom
// wrapperUrl parameter.
File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH);
String url = DEFAULT_DOWNLOAD_URL;
if(mavenWrapperPropertyFile.exists()) {
FileInputStream mavenWrapperPropertyFileInputStream = null;
try {
mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile);
Properties mavenWrapperProperties = new Properties();
mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream);
url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url);
} catch (IOException e) {
System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'");
} finally {
try {
if(mavenWrapperPropertyFileInputStream != null) {
mavenWrapperPropertyFileInputStream.close();
}
} catch (IOException e) {
// Ignore ...
}
}
}
System.out.println("- Downloading from: " + url);
File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH);
if(!outputFile.getParentFile().exists()) {
if(!outputFile.getParentFile().mkdirs()) {
System.out.println(
"- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'");
}
}
System.out.println("- Downloading to: " + outputFile.getAbsolutePath());
try {
downloadFileFromURL(url, outputFile);
System.out.println("Done");
System.exit(0);
} catch (Throwable e) {
System.out.println("- Error downloading");
e.printStackTrace();
System.exit(1);
}
}
private static void downloadFileFromURL(String urlString, File destination) throws Exception {
if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) {
String username = System.getenv("MVNW_USERNAME");
char[] password = System.getenv("MVNW_PASSWORD").toCharArray();
Authenticator.setDefault(new Authenticator() {
@Override
protected PasswordAuthentication getPasswordAuthentication() {
return new PasswordAuthentication(username, password);
}
});
}
URL website = new URL(urlString);
ReadableByteChannel rbc;
rbc = Channels.newChannel(website.openStream());
FileOutputStream fos = new FileOutputStream(destination);
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
fos.close();
rbc.close();
}
}

2
.mvn/wrapper/maven-wrapper.properties vendored

@ -0,0 +1,2 @@
distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip
wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar

2
README.md

@ -80,7 +80,7 @@ Welcome to participate in contributing, please refer to the process of submittin
### How to Build
```bash
mvn clean install -Prelease
./mvnw clean install -Prelease
```
Artifact:

2
README_zh_CN.md

@ -77,7 +77,7 @@ DolphinScheduler的工作计划:<a href="https://github.com/apache/incubator-d
### How to Build
```bash
mvn clean install -Prelease
./mvnw clean install -Prelease
```
Artifact:

33
docker/postgres/docker-entrypoint-initdb/init.sql

@ -191,7 +191,7 @@ CREATE TABLE t_ds_alert (
content text ,
alert_type int DEFAULT NULL ,
alert_status int DEFAULT '0' ,
log text ,
·log· text ,
alertgroup_id int DEFAULT NULL ,
receivers text ,
receivers_cc text ,
@ -283,18 +283,6 @@ CREATE TABLE t_ds_error_command (
-- Table structure for table t_ds_master_server
--
DROP TABLE IF EXISTS t_ds_master_server;
CREATE TABLE t_ds_master_server (
id int NOT NULL ,
host varchar(45) DEFAULT NULL ,
port int DEFAULT NULL ,
zk_directory varchar(64) DEFAULT NULL ,
res_info varchar(256) DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
last_heartbeat_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
) ;
--
-- Table structure for table t_ds_process_definition
--
@ -319,6 +307,8 @@ CREATE TABLE t_ds_process_definition (
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
update_time timestamp DEFAULT NULL ,
modify_by varchar(36) DEFAULT '' ,
resource_ids varchar(64),
PRIMARY KEY (id)
) ;
@ -359,7 +349,7 @@ CREATE TABLE t_ds_process_instance (
history_cmd text ,
dependence_schedule_times text ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
worker_group varchar(64) ,
timeout int DEFAULT '0' ,
tenant_id int NOT NULL DEFAULT '-1' ,
PRIMARY KEY (id)
@ -505,9 +495,12 @@ CREATE TABLE t_ds_resources (
size bigint DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
pid int,
full_name varchar(64),
is_directory int,
PRIMARY KEY (id)
) ;
;
--
-- Table structure for table t_ds_schedules
@ -526,7 +519,7 @@ CREATE TABLE t_ds_schedules (
warning_type int NOT NULL ,
warning_group_id int DEFAULT NULL ,
process_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
worker_group varchar(64),
create_time timestamp NOT NULL ,
update_time timestamp NOT NULL ,
PRIMARY KEY (id)
@ -572,7 +565,8 @@ CREATE TABLE t_ds_task_instance (
retry_interval int DEFAULT NULL ,
max_retry_times int DEFAULT NULL ,
task_instance_priority int DEFAULT NULL ,
worker_group_id int DEFAULT '-1' ,
worker_group varchar(64),
executor_id int DEFAULT NULL ,
PRIMARY KEY (id)
) ;
@ -691,9 +685,6 @@ ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_se
DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence;
CREATE SEQUENCE t_ds_datasource_id_sequence;
ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_master_server_id_sequence;
CREATE SEQUENCE t_ds_master_server_id_sequence;
ALTER TABLE t_ds_master_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_master_server_id_sequence');
DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence;
CREATE SEQUENCE t_ds_process_definition_id_sequence;
ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence');
@ -768,4 +759,4 @@ INSERT INTO t_ds_relation_user_alertgroup(alertgroup_id,user_id,create_time,upda
INSERT INTO t_ds_queue(queue_name,queue,create_time,update_time) VALUES ('default', 'default','2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- Records of t_ds_queue,default queue name : default
INSERT INTO t_ds_version(version) VALUES ('1.2.0');
INSERT INTO t_ds_version(version) VALUES ('2.0.0');

2
dockerfile/Dockerfile

@ -90,6 +90,6 @@ RUN chmod +x /root/checkpoint.sh && \
RUN rm -rf /var/cache/apk/*
#9. expose port
EXPOSE 2181 2888 3888 5432 12345 50051 8888
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]

24
dockerfile/README.md

@ -162,18 +162,6 @@ This environment variable sets the runtime environment for task. The default val
User data directory path, self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler`
**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`**
Directory path for user data download. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/download`
**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`**
Process execute directory. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/exec`
**`TASK_QUEUE`**
This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`.
**`ZOOKEEPER_QUORUM`**
This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`.
@ -208,6 +196,10 @@ This environment variable sets max cpu load avg for `master-server`. The default
This environment variable sets reserved memory for `master-server`. The default value is `0.1`.
**`MASTER_LISTEN_PORT`**
This environment variable sets port for `master-server`. The default value is `5678`.
**`WORKER_EXEC_THREADS`**
This environment variable sets exec thread num for `worker-server`. The default value is `100`.
@ -228,6 +220,14 @@ This environment variable sets max cpu load avg for `worker-server`. The default
This environment variable sets reserved memory for `worker-server`. The default value is `0.1`.
**`WORKER_LISTEN_PORT`**
This environment variable sets port for `worker-server`. The default value is `1234`.
**`WORKER_GROUP`**
This environment variable sets group for `worker-server`. The default value is `default`.
**`XLS_FILE_PATH`**
This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`.

24
dockerfile/README_zh_CN.md

@ -162,18 +162,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
用户数据目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler`
**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`**
用户数据下载目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/download`
**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`**
任务执行目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/exec`
**`TASK_QUEUE`**
配置`master-server`和`worker-serverr`的`Zookeeper`任务队列名, 默认值 `zookeeper`
**`ZOOKEEPER_QUORUM`**
配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`
@ -208,6 +196,10 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
配置`master-server`的保留内存,默认值 `0.1`
**`MASTER_LISTEN_PORT`**
配置`master-server`的端口,默认值 `5678`
**`WORKER_EXEC_THREADS`**
配置`worker-server`中的执行线程数量,默认值 `100`
@ -228,6 +220,14 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
配置`worker-server`的保留内存,默认值 `0.1`
**`WORKER_LISTEN_PORT`**
配置`worker-server`的端口,默认值 `1234`
**`WORKER_GROUP`**
配置`worker-server`的分组,默认值 `default`
**`XLS_FILE_PATH`**
配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`

8
dockerfile/conf/dolphinscheduler/application-api.properties.tpl

@ -14,21 +14,29 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# server port
server.port=12345
# session config
server.servlet.session.timeout=7200
# servlet config
server.servlet.context-path=/dolphinscheduler/
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
# post content
server.jetty.max-http-post-size=5000000
# i18n
spring.messages.encoding=UTF-8
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages
# Authentication types (supported types: PASSWORD)
security.authentication.type=PASSWORD

115
dockerfile/conf/dolphinscheduler/application.properties.tpl

@ -1,115 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
spring.datasource.username=${POSTGRESQL_USERNAME}
spring.datasource.password=${POSTGRESQL_PASSWORD}
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
spring.datasource.spring.datasource.filters=stat,wall,log4j
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
#mybatis
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
#Entity scan, where multiple packages are separated by a comma or semicolon
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
mybatis-plus.global-config.db-config.id-type=AUTO
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
#The hump underline is converted
mybatis-plus.global-config.db-config.column-underline=true
mybatis-plus.global-config.db-config.logic-delete-value=-1
mybatis-plus.global-config.db-config.logic-not-delete-value=0
mybatis-plus.global-config.db-config.banner=false
#The original configuration
mybatis-plus.configuration.map-underscore-to-camel-case=true
mybatis-plus.configuration.cache-enabled=false
mybatis-plus.configuration.call-setters-on-nulls=true
mybatis-plus.configuration.jdbc-type-for-null=null
# master settings
# master execute thread num
master.exec.threads=${MASTER_EXEC_THREADS}
# master execute task number in parallel
master.exec.task.num=${MASTER_EXEC_TASK_NUM}
# master heartbeat interval
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
# master commit task retry times
master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES}
# master commit task interval
master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL}
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=${MASTER_RESERVED_MEMORY}
# worker settings
# worker execute thread num
worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# submit the number of tasks at a time
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# data quality analysis is not currently in use. please ignore the following configuration
# task record
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx

76
dockerfile/conf/dolphinscheduler/common.properties.tpl

@ -15,70 +15,64 @@
# limitations under the License.
#
#task queue implementation, default "zookeeper"
dolphinscheduler.queue.impl=${TASK_QUEUE}
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
zookeeper.quorum=${ZOOKEEPER_QUORUM}
#dolphinscheduler root directory
zookeeper.dolphinscheduler.root=/dolphinscheduler
#dolphinscheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.base.sleep=100
zookeeper.retry.max.sleep=30000
zookeeper.retry.maxtime=5
#============================================================================
# System
#============================================================================
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
# is development state? default "false"
development.state=true
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH}
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH}
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
resource.storage.type=NONE
#============================================================================
# HDFS
#============================================================================
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
data.store2hdfs.basepath=/dolphinscheduler
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
#resource.upload.path=/dolphinscheduler
# whether kerberos starts
hadoop.security.authentication.startup.state=false
#hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
#java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
#login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
#login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# kerberos expire time
kerberos.expire.time=7
#============================================================================
# S3
#============================================================================
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://dolphinscheduler
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
# if resource.storage.type=S3,s3 endpoint
#fs.s3a.endpoint=http://192.168.199.91:9010
# if resource.storage.type=S3,s3 access key
#fs.s3a.access.key=A3DXS30FO22544RE
# if resource.storage.type=S3,s3 secret key
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s

79
dockerfile/conf/dolphinscheduler/conf/worker_logback.xml

@ -0,0 +1,79 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds">
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"></filter>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
     
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
  
</appender>
<root level="INFO">
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>

71
dockerfile/conf/dolphinscheduler/datasource.properties.tpl

@ -0,0 +1,71 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
spring.datasource.username=${POSTGRESQL_USERNAME}
spring.datasource.password=${POSTGRESQL_PASSWORD}
## base spring data source configuration todo need to remove
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# connection configuration
#spring.datasource.initialSize=5
# min connection number
#spring.datasource.minIdle=5
# max connection number
#spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
#spring.datasource.maxWait=60000
# milliseconds for check to close free connections
#spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
#spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
#spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
#spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
#spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
#spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
#spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
#spring.datasource.testOnReturn=false
#spring.datasource.defaultAutoCommit=true
#spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
#spring.datasource.poolPreparedStatements=true
#spring.datasource.maxPoolPreparedStatementPerConnectionSize=20

40
dockerfile/conf/dolphinscheduler/master.properties.tpl

@ -0,0 +1,40 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# master execute thread num
master.exec.threads=${MASTER_EXEC_THREADS}
# master execute task number in parallel
master.exec.task.num=${MASTER_EXEC_TASK_NUM}
# master heartbeat interval
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
# master commit task retry times
master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES}
# master commit task interval
master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL}
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=${MASTER_RESERVED_MEMORY}
# master listen port
#master.listen.port=${MASTER_LISTEN_PORT}

47
dockerfile/conf/dolphinscheduler/quartz.properties.tpl

@ -19,39 +19,36 @@
# Configure Main Scheduler Properties
#============================================================================
#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
# postgre
org.quartz.dataSource.myDs.driver = org.postgresql.Driver
org.quartz.dataSource.myDs.URL = jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
org.quartz.dataSource.myDs.user = ${POSTGRESQL_USERNAME}
org.quartz.dataSource.myDs.password = ${POSTGRESQL_PASSWORD}
org.quartz.scheduler.instanceName = DolphinScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
#org.quartz.scheduler.instanceName = DolphinScheduler
#org.quartz.scheduler.instanceId = AUTO
#org.quartz.scheduler.makeSchedulerThreadDaemon = true
#org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
#org.quartz.threadPool.makeThreadsDaemons = true
#org.quartz.threadPool.threadCount = 25
#org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.acquireTriggersWithinLock=true
org.quartz.jobStore.dataSource = myDs
#org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
#org.quartz.jobStore.tablePrefix = QRTZ_
#org.quartz.jobStore.isClustered = true
#org.quartz.jobStore.misfireThreshold = 60000
#org.quartz.jobStore.clusterCheckinInterval = 5000
#org.quartz.jobStore.acquireTriggersWithinLock=true
#org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1
#org.quartz.dataSource.myDs.connectionProvider.class = org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider

37
dockerfile/conf/dolphinscheduler/worker.properties.tpl

@ -0,0 +1,37 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# worker execute thread num
worker.exec.threads=${WORKER_EXEC_THREADS}
# worker heartbeat interval
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
# submit the number of tasks at a time
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
# worker listener port
#worker.listen.port=${WORKER_LISTEN_PORT}
# default worker group
#worker.group=${WORKER_GROUP}

29
dockerfile/conf/dolphinscheduler/zookeeper.properties.tpl

@ -0,0 +1,29 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
zookeeper.quorum=${ZOOKEEPER_QUORUM}
# dolphinscheduler root directory
#zookeeper.dolphinscheduler.root=/dolphinscheduler
# dolphinscheduler failover directory
#zookeeper.session.timeout=300
#zookeeper.connection.timeout=300
#zookeeper.retry.base.sleep=100
#zookeeper.retry.max.sleep=30000
#zookeeper.retry.maxtime=5

6
dockerfile/startup-init-conf.sh

@ -35,13 +35,10 @@ export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-"dolphinscheduler"}
#============================================================================
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
export DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH:-"/tmp/dolphinscheduler/download"}
export DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH:-"/tmp/dolphinscheduler/exec"}
#============================================================================
# Zookeeper
#============================================================================
export TASK_QUEUE=${TASK_QUEUE:-"zookeeper"}
export ZOOKEEPER_QUORUM=${ZOOKEEPER_QUORUM:-"127.0.0.1:2181"}
#============================================================================
@ -54,6 +51,7 @@ export MASTER_TASK_COMMIT_RETRYTIMES=${MASTER_TASK_COMMIT_RETRYTIMES:-"5"}
export MASTER_TASK_COMMIT_INTERVAL=${MASTER_TASK_COMMIT_INTERVAL:-"1000"}
export MASTER_MAX_CPULOAD_AVG=${MASTER_MAX_CPULOAD_AVG:-"100"}
export MASTER_RESERVED_MEMORY=${MASTER_RESERVED_MEMORY:-"0.1"}
export MASTER_LISTEN_PORT=${MASTER_LISTEN_PORT:-"5678"}
#============================================================================
# Worker Server
@ -63,6 +61,8 @@ export WORKER_HEARTBEAT_INTERVAL=${WORKER_HEARTBEAT_INTERVAL:-"10"}
export WORKER_FETCH_TASK_NUM=${WORKER_FETCH_TASK_NUM:-"3"}
export WORKER_MAX_CPULOAD_AVG=${WORKER_MAX_CPULOAD_AVG:-"100"}
export WORKER_RESERVED_MEMORY=${WORKER_RESERVED_MEMORY:-"0.1"}
export WORKER_LISTEN_PORT=${WORKER_LISTEN_PORT:-"1234"}
export WORKER_GROUP=${WORKER_GROUP:-"default"}
#============================================================================
# Alert Server

6
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java

@ -55,7 +55,7 @@ public class MailUtils {
public static final Boolean MAIL_USE_SSL = PropertyUtils.getBoolean(Constants.MAIL_SMTP_SSL_ENABLE);
public static final String XLS_FILE_PATH = PropertyUtils.getString(Constants.XLS_FILE_PATH);
public static final String xlsFilePath = PropertyUtils.getString(Constants.XLS_FILE_PATH,"/tmp/xls");
public static final String STARTTLS_ENABLE = PropertyUtils.getString(Constants.MAIL_SMTP_STARTTLS_ENABLE);
@ -261,8 +261,8 @@ public class MailUtils {
// set attach file
MimeBodyPart part2 = new MimeBodyPart();
// make excel file
ExcelUtils.genExcelFile(content,title, XLS_FILE_PATH);
File file = new File(XLS_FILE_PATH + Constants.SINGLE_SLASH + title + Constants.EXCEL_SUFFIX_XLS);
ExcelUtils.genExcelFile(content,title, xlsFilePath);
File file = new File(xlsFilePath + Constants.SINGLE_SLASH + title + Constants.EXCEL_SUFFIX_XLS);
part2.attachFile(file);
part2.setFileName(MimeUtility.encodeText(title + Constants.EXCEL_SUFFIX_XLS,Constants.UTF_8,"B"));
// add components to collection

12
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/PropertyUtils.java

@ -79,6 +79,18 @@ public class PropertyUtils {
return properties.getProperty(key.trim());
}
/**
* get property value
*
* @param key property name
* @param defaultVal default value
* @return property value
*/
public static String getString(String key, String defaultVal) {
String val = properties.getProperty(key.trim());
return val == null ? defaultVal : val;
}
/**
* get property value
*

18
dolphinscheduler-alert/src/main/resources/alert.properties

@ -35,18 +35,18 @@ mail.smtp.ssl.enable=false
mail.smtp.ssl.trust=xxx.xxx.com
#xls file path,need create if not exist
xls.file.path=/tmp/xls
#xls.file.path=/tmp/xls
# Enterprise WeChat configuration
enterprise.wechat.enable=false
enterprise.wechat.corp.id=xxxxxxx
enterprise.wechat.secret=xxxxxxx
enterprise.wechat.agent.id=xxxxxxx
enterprise.wechat.users=xxxxxxx
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
#enterprise.wechat.corp.id=xxxxxxx
#enterprise.wechat.secret=xxxxxxx
#enterprise.wechat.agent.id=xxxxxxx
#enterprise.wechat.users=xxxxxxx
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
#enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
#enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}

52
dolphinscheduler-alert/src/main/resources/logback-alert.xml

@ -0,0 +1,52 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>
</configuration>

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/ApiApplicationServer.java

@ -21,13 +21,15 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
@SpringBootApplication
@ServletComponentScan
@ComponentScan({"org.apache.dolphinscheduler.api",
"org.apache.dolphinscheduler.dao",
"org.apache.dolphinscheduler.service"})
@ComponentScan(basePackages = {"org.apache.dolphinscheduler"},
excludeFilters = @ComponentScan.Filter(type = FilterType.REGEX,
pattern = "org.apache.dolphinscheduler.server.*"))
public class ApiApplicationServer extends SpringBootServletInitializer {
public static void main(String[] args) {

129
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/AccessTokenController.java

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.api.controller;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ApiException;
import org.apache.dolphinscheduler.api.service.AccessTokenService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
@ -37,13 +38,14 @@ import springfox.documentation.annotations.ApiIgnore;
import java.util.Map;
import static org.apache.dolphinscheduler.api.enums.Status.*;
/**
* access token controller
*/
@Api(tags = "ACCESS_TOKEN_TAG", position = 1)
@RestController
@RequestMapping("/access-token")
public class AccessTokenController extends BaseController{
public class AccessTokenController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(AccessTokenController.class);
@ -54,140 +56,125 @@ public class AccessTokenController extends BaseController{
/**
* create token
* @param loginUser login user
* @param userId token for user id
*
* @param loginUser login user
* @param userId token for user id
* @param expireTime expire time for the token
* @param token token
* @param token token
* @return create result state code
*/
@ApiIgnore
@PostMapping(value = "/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_ACCESS_TOKEN_ERROR)
public Result createToken(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "userId") int userId,
@RequestParam(value = "expireTime") String expireTime,
@RequestParam(value = "token") String token){
@RequestParam(value = "userId") int userId,
@RequestParam(value = "expireTime") String expireTime,
@RequestParam(value = "token") String token) {
logger.info("login user {}, create token , userId : {} , token expire time : {} , token : {}", loginUser.getUserName(),
userId,expireTime,token);
try {
Map<String, Object> result = accessTokenService.createToken(userId, expireTime, token);
return returnDataList(result);
}catch (Exception e){
logger.error(CREATE_ACCESS_TOKEN_ERROR.getMsg(),e);
return error(CREATE_ACCESS_TOKEN_ERROR.getCode(), CREATE_ACCESS_TOKEN_ERROR.getMsg());
}
userId, expireTime, token);
Map<String, Object> result = accessTokenService.createToken(userId, expireTime, token);
return returnDataList(result);
}
/**
* generate token string
* @param loginUser login user
* @param userId token for user
*
* @param loginUser login user
* @param userId token for user
* @param expireTime expire time
* @return token string
*/
@ApiIgnore
@PostMapping(value = "/generate")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(GENERATE_TOKEN_ERROR)
public Result generateToken(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "userId") int userId,
@RequestParam(value = "expireTime") String expireTime){
logger.info("login user {}, generate token , userId : {} , token expire time : {}",loginUser,userId,expireTime);
try {
Map<String, Object> result = accessTokenService.generateToken(userId, expireTime);
return returnDataList(result);
}catch (Exception e){
logger.error(GENERATE_TOKEN_ERROR.getMsg(),e);
return error(GENERATE_TOKEN_ERROR.getCode(), GENERATE_TOKEN_ERROR.getMsg());
}
@RequestParam(value = "userId") int userId,
@RequestParam(value = "expireTime") String expireTime) {
logger.info("login user {}, generate token , userId : {} , token expire time : {}", loginUser, userId, expireTime);
Map<String, Object> result = accessTokenService.generateToken(userId, expireTime);
return returnDataList(result);
}
/**
* query access token list paging
*
* @param loginUser login user
* @param pageNo page number
* @param pageNo page number
* @param searchVal search value
* @param pageSize page size
* @param pageSize page size
* @return token list of page number and page size
*/
@ApiOperation(value = "queryAccessTokenList", notes= "QUERY_ACCESS_TOKEN_LIST_NOTES")
@ApiOperation(value = "queryAccessTokenList", notes = "QUERY_ACCESS_TOKEN_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value="/list-paging")
@GetMapping(value = "/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_ACCESSTOKEN_LIST_PAGING_ERROR)
public Result queryAccessTokenList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize){
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize) {
logger.info("login user {}, list access token paging, pageNo: {}, searchVal: {}, pageSize: {}",
loginUser.getUserName(),pageNo,searchVal,pageSize);
try{
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = accessTokenService.queryAccessTokenList(loginUser, searchVal, pageNo, pageSize);
loginUser.getUserName(), pageNo, searchVal, pageSize);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_ACCESSTOKEN_LIST_PAGING_ERROR.getMsg(),e);
return error(QUERY_ACCESSTOKEN_LIST_PAGING_ERROR.getCode(),QUERY_ACCESSTOKEN_LIST_PAGING_ERROR.getMsg());
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = accessTokenService.queryAccessTokenList(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* delete access token by id
*
* @param loginUser login user
* @param id token id
* @param id token id
* @return delete result code
*/
@ApiIgnore
@PostMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_ACCESS_TOKEN_ERROR)
public Result delAccessTokenById(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int id) {
@RequestParam(value = "id") int id) {
logger.info("login user {}, delete access token, id: {},", loginUser.getUserName(), id);
try {
Map<String, Object> result = accessTokenService.delAccessTokenById(loginUser, id);
return returnDataList(result);
}catch (Exception e){
logger.error(DELETE_ACCESS_TOKEN_ERROR.getMsg(),e);
return error(Status.DELETE_ACCESS_TOKEN_ERROR.getCode(), Status.DELETE_ACCESS_TOKEN_ERROR.getMsg());
}
Map<String, Object> result = accessTokenService.delAccessTokenById(loginUser, id);
return returnDataList(result);
}
/**
* update token
* @param loginUser login user
* @param id token id
* @param userId token for user
*
* @param loginUser login user
* @param id token id
* @param userId token for user
* @param expireTime token expire time
* @param token token string
* @param token token string
* @return update result code
*/
@ApiIgnore
@PostMapping(value = "/update")
@ResponseStatus(HttpStatus.OK)
@ApiException(UPDATE_ACCESS_TOKEN_ERROR)
public Result updateToken(@RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int id,
@RequestParam(value = "userId") int userId,
@RequestParam(value = "expireTime") String expireTime,
@RequestParam(value = "token") String token){
@RequestParam(value = "token") String token) {
logger.info("login user {}, update token , userId : {} , token expire time : {} , token : {}", loginUser.getUserName(),
userId,expireTime,token);
try {
Map<String, Object> result = accessTokenService.updateToken(id,userId, expireTime, token);
return returnDataList(result);
}catch (Exception e){
logger.error(UPDATE_ACCESS_TOKEN_ERROR.getMsg(),e);
return error(UPDATE_ACCESS_TOKEN_ERROR.getCode(), UPDATE_ACCESS_TOKEN_ERROR.getMsg());
}
userId, expireTime, token);
Map<String, Object> result = accessTokenService.updateToken(id, userId, expireTime, token);
return returnDataList(result);
}
}

14
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ExecutorController.java

@ -64,7 +64,7 @@ public class ExecutorController extends BaseController {
* @param receiversCc receivers cc
* @param runMode run mode
* @param processInstancePriority process instance priority
* @param workerGroupId worker group id
* @param workerGroup worker group
* @param timeout timeout
* @return start process result code
*/
@ -82,7 +82,7 @@ public class ExecutorController extends BaseController {
@ApiImplicitParam(name = "receiversCc", value = "RECEIVERS_CC",dataType ="String" ),
@ApiImplicitParam(name = "runMode", value = "RUN_MODE",dataType ="RunMode" ),
@ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", required = true, dataType = "Priority" ),
@ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int",example = "100"),
@ApiImplicitParam(name = "workerGroup", value = "WORKER_GROUP", dataType = "String",example = "default"),
@ApiImplicitParam(name = "timeout", value = "TIMEOUT", dataType = "Int",example = "100"),
})
@PostMapping(value = "start-process-instance")
@ -101,15 +101,15 @@ public class ExecutorController extends BaseController {
@RequestParam(value = "receiversCc", required = false) String receiversCc,
@RequestParam(value = "runMode", required = false) RunMode runMode,
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority,
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "timeout", required = false) Integer timeout) {
try {
logger.info("login user {}, start process instance, project name: {}, process definition id: {}, schedule time: {}, "
+ "failure policy: {}, node name: {}, node dep: {}, notify type: {}, "
+ "notify group id: {},receivers:{},receiversCc:{}, run mode: {},process instance priority:{}, workerGroupId: {}, timeout: {}",
+ "notify group id: {},receivers:{},receiversCc:{}, run mode: {},process instance priority:{}, workerGroup: {}, timeout: {}",
loginUser.getUserName(), projectName, processDefinitionId, scheduleTime,
failureStrategy, startNodeList, taskDependType, warningType, warningGroupId,receivers,receiversCc,runMode,processInstancePriority,
workerGroupId, timeout);
failureStrategy, startNodeList, taskDependType, warningType, workerGroup,receivers,receiversCc,runMode,processInstancePriority,
workerGroup, timeout);
if (timeout == null) {
timeout = Constants.MAX_TASK_TIMEOUT;
@ -117,7 +117,7 @@ public class ExecutorController extends BaseController {
Map<String, Object> result = execService.execProcessInstance(loginUser, projectName, processDefinitionId, scheduleTime, execType, failureStrategy,
startNodeList, taskDependType, warningType,
warningGroupId,receivers,receiversCc, runMode,processInstancePriority, workerGroupId, timeout);
warningGroupId,receivers,receiversCc, runMode,processInstancePriority, workerGroup, timeout);
return returnDataList(result);
} catch (Exception e) {
logger.error(Status.START_PROCESS_INSTANCE_ERROR.getMsg(),e);

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/LoggerController.java

@ -60,14 +60,14 @@ public class LoggerController extends BaseController {
*/
@ApiOperation(value = "queryLog", notes= "QUERY_TASK_INSTANCE_LOG_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "taskInstId", value = "TASK_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "taskInstanceId", value = "TASK_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", dataType ="Int", example = "100"),
@ApiImplicitParam(name = "limit", value = "LIMIT", dataType ="Int", example = "100")
})
@GetMapping(value = "/detail")
@ResponseStatus(HttpStatus.OK)
public Result queryLog(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "taskInstId") int taskInstanceId,
@RequestParam(value = "taskInstanceId") int taskInstanceId,
@RequestParam(value = "skipLineNum") int skipNum,
@RequestParam(value = "limit") int limit) {
try {
@ -91,12 +91,12 @@ public class LoggerController extends BaseController {
*/
@ApiOperation(value = "downloadTaskLog", notes= "DOWNLOAD_TASK_INSTANCE_LOG_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "taskInstId", value = "TASK_ID",dataType = "Int", example = "100")
@ApiImplicitParam(name = "taskInstanceId", value = "TASK_ID",dataType = "Int", example = "100")
})
@GetMapping(value = "/download-log")
@ResponseBody
public ResponseEntity downloadTaskLog(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "taskInstId") int taskInstanceId) {
@RequestParam(value = "taskInstanceId") int taskInstanceId) {
try {
byte[] logBytes = loggerService.getLogBytes(taskInstanceId);
return ResponseEntity

8
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessInstanceController.java

@ -26,8 +26,6 @@ import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.*;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -242,8 +240,7 @@ public class ProcessInstanceController extends BaseController{
logger.info("delete process instance by id, login user:{}, project name:{}, process instance id:{}",
loginUser.getUserName(), projectName, processInstanceId);
// task queue
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId);
return returnDataList(result);
}catch (Exception e){
logger.error(DELETE_PROCESS_INSTANCE_BY_ID_ERROR.getMsg(),e);
@ -372,7 +369,6 @@ public class ProcessInstanceController extends BaseController{
logger.info("delete process instance by ids, login user:{}, project name:{}, process instance ids :{}",
loginUser.getUserName(), projectName, processInstanceIds);
// task queue
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
Map<String, Object> result = new HashMap<>(5);
List<String> deleteFailedIdList = new ArrayList<>();
if(StringUtils.isNotEmpty(processInstanceIds)){
@ -381,7 +377,7 @@ public class ProcessInstanceController extends BaseController{
for (String strProcessInstanceId:processInstanceIdArray) {
int processInstanceId = Integer.parseInt(strProcessInstanceId);
try {
Map<String, Object> deleteResult = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
Map<String, Object> deleteResult = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId);
if(!Status.SUCCESS.equals(deleteResult.get(Constants.STATUS))){
deleteFailedIdList.add(strProcessInstanceId);
logger.error((String)deleteResult.get(Constants.MSG));

20
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java

@ -70,13 +70,13 @@ public class SchedulerController extends BaseController {
* @param processInstancePriority process instance priority
* @param receivers receivers
* @param receiversCc receivers cc
* @param workerGroupId worker group id
* @param workerGroup worker group
* @return create result code
*/
@ApiOperation(value = "createSchedule", notes= "CREATE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
@ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type ="WarningType"),
@ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type ="FailureStrategy"),
@ -96,15 +96,15 @@ public class SchedulerController extends BaseController {
@RequestParam(value = "failureStrategy", required = false, defaultValue = DEFAULT_FAILURE_POLICY) FailureStrategy failureStrategy,
@RequestParam(value = "receivers", required = false) String receivers,
@RequestParam(value = "receiversCc", required = false) String receiversCc,
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) {
logger.info("login user {}, project name: {}, process name: {}, create schedule: {}, warning type: {}, warning group id: {}," +
"failure policy: {},receivers : {},receiversCc : {},processInstancePriority : {}, workGroupId:{}",
loginUser.getUserName(), projectName, processDefinitionId, schedule, warningType, warningGroupId,
failureStrategy, receivers, receiversCc, processInstancePriority, workerGroupId);
failureStrategy, receivers, receiversCc, processInstancePriority, workerGroup);
try {
Map<String, Object> result = schedulerService.insertSchedule(loginUser, projectName, processDefinitionId, schedule,
warningType, warningGroupId, failureStrategy, receivers, receiversCc, processInstancePriority, workerGroupId);
warningType, warningGroupId, failureStrategy, receivers, receiversCc, processInstancePriority, workerGroup);
return returnDataList(result);
} catch (Exception e) {
@ -124,7 +124,7 @@ public class SchedulerController extends BaseController {
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param receivers receivers
* @param workerGroupId worker group id
* @param workerGroup worker group
* @param processInstancePriority process instance priority
* @param receiversCc receivers cc
* @return update result code
@ -132,7 +132,7 @@ public class SchedulerController extends BaseController {
@ApiOperation(value = "updateSchedule", notes= "UPDATE_SCHEDULE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"),
@ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type ="WarningType"),
@ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"),
@ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type ="FailureStrategy"),
@ -151,16 +151,16 @@ public class SchedulerController extends BaseController {
@RequestParam(value = "failureStrategy", required = false, defaultValue = "END") FailureStrategy failureStrategy,
@RequestParam(value = "receivers", required = false) String receivers,
@RequestParam(value = "receiversCc", required = false) String receiversCc,
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) {
logger.info("login user {}, project name: {},id: {}, updateProcessInstance schedule: {}, notify type: {}, notify mails: {}, " +
"failure policy: {},receivers : {},receiversCc : {},processInstancePriority : {},workerGroupId:{}",
loginUser.getUserName(), projectName, id, schedule, warningType, warningGroupId, failureStrategy,
receivers, receiversCc, processInstancePriority, workerGroupId);
receivers, receiversCc, processInstancePriority, workerGroup);
try {
Map<String, Object> result = schedulerService.updateSchedule(loginUser, projectName, id, schedule,
warningType, warningGroupId, failureStrategy, receivers, receiversCc, null, processInstancePriority, workerGroupId);
warningType, warningGroupId, failureStrategy, receivers, receiversCc, null, processInstancePriority, workerGroup);
return returnDataList(result);
} catch (Exception e) {

4
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/WorkerGroupController.java

@ -27,6 +27,7 @@ import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -34,6 +35,7 @@ import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.*;
import springfox.documentation.annotations.ApiIgnore;
import java.util.List;
import java.util.Map;
/**
@ -46,7 +48,6 @@ public class WorkerGroupController extends BaseController{
private static final Logger logger = LoggerFactory.getLogger(WorkerGroupController.class);
@Autowired
WorkerGroupService workerGroupService;
@ -135,6 +136,7 @@ public class WorkerGroupController extends BaseController{
loginUser.getUserName() );
try {
Map<String, Object> result = workerGroupService.queryAllGroup();
return returnDataList(result);
}catch (Exception e){

13
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ProcessMeta.java

@ -96,11 +96,6 @@ public class ProcessMeta {
*/
private String scheduleProcessInstancePriority;
/**
* worker group id
*/
private Integer scheduleWorkerGroupId;
/**
* worker group name
*/
@ -226,14 +221,6 @@ public class ProcessMeta {
this.scheduleProcessInstancePriority = scheduleProcessInstancePriority;
}
public Integer getScheduleWorkerGroupId() {
return scheduleWorkerGroupId;
}
public void setScheduleWorkerGroupId(int scheduleWorkerGroupId) {
this.scheduleWorkerGroupId = scheduleWorkerGroupId;
}
public String getScheduleWorkerGroupName() {
return scheduleWorkerGroupName;
}

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -27,6 +27,8 @@ public enum Status {
SUCCESS(0, "success", "成功"),
INTERNAL_SERVER_ERROR_ARGS(10000, "Internal Server Error: {0}", "服务端异常: {0}"),
REQUEST_PARAMS_NOT_VALID_ERROR(10001, "request parameter {0} is not valid", "请求参数[{0}]无效"),
TASK_TIMEOUT_PARAMS_ERROR(10002, "task timeout parameter is not valid", "任务超时参数无效"),
USER_NAME_EXIST(10003, "user name already exists", "用户名已存在"),
@ -190,6 +192,7 @@ public enum Status {
UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource","请检查任务节点并移除无权限或者已删除的资源"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),

34
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ApiException.java

@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.exceptions;
import org.apache.dolphinscheduler.api.enums.Status;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
/**
* controller exception annotation
*/
@Retention(RUNTIME)
@Target(METHOD)
public @interface ApiException {
Status value();
}

48
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/exceptions/ApiExceptionHandler.java

@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.exceptions;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.web.bind.annotation.ControllerAdvice;
import org.springframework.web.bind.annotation.ExceptionHandler;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.method.HandlerMethod;
/**
* Exception Handler
*/
@ControllerAdvice
@ResponseBody
public class ApiExceptionHandler {
private static final Logger logger = LoggerFactory.getLogger(ApiExceptionHandler.class);
@ExceptionHandler(Exception.class)
public Result exceptionHandler(Exception e, HandlerMethod hm) {
logger.error(e.getMessage(), e);
ApiException ce = hm.getMethodAnnotation(ApiException.class);
if (ce == null) {
return Result.errorWithArgs(Status.INTERNAL_SERVER_ERROR_ARGS, e.getMessage());
}
Status st = ce.value();
return Result.error(st);
}
}

3
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/AccessTokenService.java

@ -83,6 +83,9 @@ public class AccessTokenService extends BaseService {
public Map<String, Object> createToken(int userId, String expireTime, String token) {
Map<String, Object> result = new HashMap<>(5);
if (userId <= 0) {
throw new IllegalArgumentException("User id should not less than or equals to 0.");
}
AccessToken accessToken = new AccessToken();
accessToken.setUserId(userId);
accessToken.setExpireTime(DateUtils.stringToDate(expireTime));

7
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataAnalysisService.java

@ -29,8 +29,6 @@ import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -318,9 +316,8 @@ public class DataAnalysisService extends BaseService{
return result;
}
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
List<String> tasksQueueList = tasksQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_QUEUE);
List<String> tasksKillList = tasksQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_KILL);
List<String> tasksQueueList = new ArrayList<>();
List<String> tasksKillList = new ArrayList<>();
Map<String,Integer> dataMap = new HashMap<>();
if (loginUser.getUserType() == UserType.ADMIN_USER){

41
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java

@ -85,7 +85,7 @@ public class ExecutorService extends BaseService{
* @param receivers receivers
* @param receiversCc receivers cc
* @param processInstancePriority process instance priority
* @param workerGroupId worker group id
* @param workerGroup worker group name
* @param runMode run mode
* @param timeout timeout
* @return execute process instance code
@ -96,7 +96,7 @@ public class ExecutorService extends BaseService{
FailureStrategy failureStrategy, String startNodeList,
TaskDependType taskDependType, WarningType warningType, int warningGroupId,
String receivers, String receiversCc, RunMode runMode,
Priority processInstancePriority, int workerGroupId, Integer timeout) throws ParseException {
Priority processInstancePriority, String workerGroup, Integer timeout) throws ParseException {
Map<String, Object> result = new HashMap<>(5);
// timeout is invalid
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
@ -128,7 +128,7 @@ public class ExecutorService extends BaseService{
*/
int create = this.createCommand(commandType, processDefinitionId,
taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
warningGroupId, runMode,processInstancePriority, workerGroupId);
warningGroupId, runMode,processInstancePriority, workerGroup);
if(create > 0 ){
/**
* according to the process definition ID updateProcessInstance and CC recipient
@ -435,25 +435,26 @@ public class ExecutorService extends BaseService{
/**
* create command
*
* @param commandType
* @param processDefineId
* @param nodeDep
* @param failureStrategy
* @param startNodeList
* @param schedule
* @param warningType
* @param excutorId
* @param warningGroupId
* @param runMode
* @return
* @param commandType commandType
* @param processDefineId processDefineId
* @param nodeDep nodeDep
* @param failureStrategy failureStrategy
* @param startNodeList startNodeList
* @param schedule schedule
* @param warningType warningType
* @param executorId executorId
* @param warningGroupId warningGroupId
* @param runMode runMode
* @param processInstancePriority processInstancePriority
* @param workerGroup workerGroup
* @return command id
* @throws ParseException
*/
private int createCommand(CommandType commandType, int processDefineId,
TaskDependType nodeDep, FailureStrategy failureStrategy,
String startNodeList, String schedule, WarningType warningType,
int excutorId, int warningGroupId,
RunMode runMode,Priority processInstancePriority, int workerGroupId){
int executorId, int warningGroupId,
RunMode runMode,Priority processInstancePriority, String workerGroup) throws ParseException {
/**
* instantiate command schedule instance
@ -481,10 +482,10 @@ public class ExecutorService extends BaseService{
command.setWarningType(warningType);
}
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setExecutorId(excutorId);
command.setExecutorId(executorId);
command.setWarningGroupId(warningGroupId);
command.setProcessInstancePriority(processInstancePriority);
command.setWorkerGroupId(workerGroupId);
command.setWorkerGroup(workerGroup);
Date start = null;
Date end = null;
@ -541,7 +542,7 @@ public class ExecutorService extends BaseService{
processDefineId, schedule);
}
}else{
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processService.createCommand(command);
}

5
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/LoggerService.java

@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
@ -68,7 +69,7 @@ public class LoggerService {
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
}
String host = taskInstance.getHost();
String host = Host.of(taskInstance.getHost()).getIp();
if(StringUtils.isEmpty(host)){
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
}
@ -94,7 +95,7 @@ public class LoggerService {
if (taskInstance == null){
throw new RuntimeException("task instance is null");
}
String host = taskInstance.getHost();
String host = Host.of(taskInstance.getHost()).getIp();
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
}
}

39
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java

@ -44,6 +44,7 @@ import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -143,6 +144,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
@ -333,6 +335,7 @@ public class ProcessDefinitionService extends BaseDAGService {
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = new ArrayList<>();
@ -476,6 +479,20 @@ public class ProcessDefinitionService extends BaseDAGService {
switch (state) {
case ONLINE:
// To check resources whether they are already cancel authorized or deleted
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(",")).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resourceIdArray,loginUser.getId(),logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, "releaseState");
return result;
}
}
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
break;
@ -580,13 +597,13 @@ public class ProcessDefinitionService extends BaseDAGService {
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
WorkerGroup workerGroup = workerGroupMapper.selectById(schedule.getWorkerGroupId());
/*WorkerGroup workerGroup = workerGroupMapper.selectById(schedule.getWorkerGroupId());
if (null == workerGroup && schedule.getWorkerGroupId() == -1) {
workerGroup = new WorkerGroup();
workerGroup.setId(-1);
workerGroup.setName("");
}
}*/
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
@ -596,11 +613,7 @@ public class ProcessDefinitionService extends BaseDAGService {
exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE));
exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
if (null != workerGroup) {
exportProcessMeta.setScheduleWorkerGroupId(workerGroup.getId());
exportProcessMeta.setScheduleWorkerGroupName(workerGroup.getName());
}
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
}
//create workflow json file
return JSONUtils.toJsonString(exportProcessMeta);
@ -799,15 +812,9 @@ public class ProcessDefinitionService extends BaseDAGService {
if (null != processMeta.getScheduleProcessInstancePriority()) {
scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority()));
}
if (null != processMeta.getScheduleWorkerGroupId()) {
scheduleObj.setWorkerGroupId(processMeta.getScheduleWorkerGroupId());
} else {
if (null != processMeta.getScheduleWorkerGroupName()) {
List<WorkerGroup> workerGroups = workerGroupMapper.queryWorkerGroupByName(processMeta.getScheduleWorkerGroupName());
if(CollectionUtils.isNotEmpty(workerGroups)){
scheduleObj.setWorkerGroupId(workerGroups.get(0).getId());
}
}
if (null != processMeta.getScheduleWorkerGroupName()) {
scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName());
}
return scheduleMapper.insert(scheduleObj);

61
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java

@ -39,7 +39,6 @@ import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -116,18 +115,7 @@ public class ProcessInstanceService extends BaseDAGService {
return checkResult;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId);
String workerGroupName = "";
if(processInstance.getWorkerGroupId() == -1){
workerGroupName = DEFAULT;
}else{
WorkerGroup workerGroup = workerGroupMapper.selectById(processInstance.getWorkerGroupId());
if(workerGroup != null){
workerGroupName = workerGroup.getName();
}else{
workerGroupName = DEFAULT;
}
}
processInstance.setWorkerGroupName(workerGroupName);
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
processInstance.setReceivers(processDefinition.getReceivers());
processInstance.setReceiversCc(processDefinition.getReceiversCc());
@ -475,11 +463,10 @@ public class ProcessInstanceService extends BaseDAGService {
* @param loginUser login user
* @param projectName project name
* @param processInstanceId process instance id
* @param tasksQueue task queue
* @return delete result code
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId, ITaskQueue tasksQueue) {
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
@ -497,51 +484,7 @@ public class ProcessInstanceService extends BaseDAGService {
return result;
}
//process instance priority
int processInstancePriority = processInstance.getProcessInstancePriority().ordinal();
// delete zk queue
if (CollectionUtils.isNotEmpty(taskInstanceList)){
for (TaskInstance taskInstance : taskInstanceList){
// task instance priority
int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
StringBuilder nodeValueSb = new StringBuilder(100);
nodeValueSb.append(processInstancePriority)
.append(UNDERLINE)
.append(processInstanceId)
.append(UNDERLINE)
.append(taskInstancePriority)
.append(UNDERLINE)
.append(taskInstance.getId())
.append(UNDERLINE);
int taskWorkerGroupId = processService.getTaskWorkerGroupId(taskInstance);
WorkerGroup workerGroup = workerGroupMapper.selectById(taskWorkerGroupId);
if(workerGroup == null){
nodeValueSb.append(DEFAULT_WORKER_ID);
}else {
String ips = workerGroup.getIpList();
StringBuilder ipSb = new StringBuilder(100);
String[] ipArray = ips.split(COMMA);
for (String ip : ipArray) {
long ipLong = IpUtils.ipToLong(ip);
ipSb.append(ipLong).append(COMMA);
}
if(ipSb.length() > 0) {
ipSb.deleteCharAt(ipSb.length() - 1);
}
nodeValueSb.append(ipSb);
}
logger.info("delete task queue node : {}",nodeValueSb);
tasksQueue.removeNode(org.apache.dolphinscheduler.common.Constants.DOLPHINSCHEDULER_TASKS_QUEUE, nodeValueSb.toString());
}
}
// delete database cascade
int delete = processService.deleteWorkProcessInstanceById(processInstanceId);

99
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java

@ -36,6 +36,7 @@ import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -176,6 +177,21 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
@ -416,6 +432,14 @@ public class ResourcesService extends BaseService {
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
@ -505,8 +529,12 @@ public class ResourcesService extends BaseService {
Map<String, Object> result = new HashMap<>(5);
Set<Resource> allResourceList = getAllResources(loginUser, type);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(new ArrayList<>(allResourceList));
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
@ -519,7 +547,7 @@ public class ResourcesService extends BaseService {
* @param loginUser login user
* @return all resource set
*/
private Set<Resource> getAllResources(User loginUser, ResourceType type) {
/*private Set<Resource> getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId();
boolean listChildren = true;
if(isAdmin(loginUser)){
@ -540,7 +568,7 @@ public class ResourcesService extends BaseService {
}
}
return allResourceList;
}
}*/
/**
* query resource list
@ -553,7 +581,7 @@ public class ResourcesService extends BaseService {
Map<String, Object> result = new HashMap<>(5);
Set<Resource> allResourceList = getAllResources(loginUser, type);
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
@ -592,15 +620,6 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new int[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
@ -608,10 +627,22 @@ public class ResourcesService extends BaseService {
}
// get all resource id of process definitions those is released
Map<Integer, Set<Integer>> resourceProcessMap = getResourceProcessMap();
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
@ -632,8 +663,8 @@ public class ResourcesService extends BaseService {
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(allChildren.toArray(new Integer[allChildren.size()]));
resourceUserMapper.deleteResourceUser(0, resourceId);
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
@ -1191,38 +1222,4 @@ public class ResourcesService extends BaseService {
}
}
/**
* get resource process map key is resource id,value is the set of process definition
* @return resource process definition map
*/
private Map<Integer,Set<Integer>> getResourceProcessMap(){
Map<Integer, String> map = new HashMap<>();
Map<Integer, Set<Integer>> result = new HashMap<>();
List<Map<String, Object>> list = processDefinitionMapper.listResources();
if (CollectionUtils.isNotEmpty(list)) {
for (Map<String, Object> tempMap : list) {
map.put((Integer) tempMap.get("id"), (String)tempMap.get("resource_ids"));
}
}
for (Map.Entry<Integer, String> entry : map.entrySet()) {
Integer mapKey = entry.getKey();
String[] arr = entry.getValue().split(",");
Set<Integer> mapValues = Arrays.stream(arr).map(Integer::parseInt).collect(Collectors.toSet());
for (Integer value : mapValues) {
if (result.containsKey(value)) {
Set<Integer> set = result.get(value);
set.add(mapKey);
result.put(value, set);
} else {
Set<Integer> set = new HashSet<>();
set.add(mapKey);
result.put(value, set);
}
}
}
return result;
}
}

12
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java

@ -92,7 +92,7 @@ public class SchedulerService extends BaseService {
* @param processInstancePriority process instance priority
* @param receivers receivers
* @param receiversCc receivers cc
* @param workerGroupId worker group id
* @param workerGroup worker group
* @return create result code
* @throws IOException ioexception
*/
@ -106,7 +106,7 @@ public class SchedulerService extends BaseService {
String receivers,
String receiversCc,
Priority processInstancePriority,
int workerGroupId) throws IOException {
String workerGroup) throws IOException {
Map<String, Object> result = new HashMap<String, Object>(5);
@ -156,7 +156,7 @@ public class SchedulerService extends BaseService {
scheduleObj.setUserName(loginUser.getUserName());
scheduleObj.setReleaseState(ReleaseState.OFFLINE);
scheduleObj.setProcessInstancePriority(processInstancePriority);
scheduleObj.setWorkerGroupId(workerGroupId);
scheduleObj.setWorkerGroup(workerGroup);
scheduleMapper.insert(scheduleObj);
/**
@ -182,7 +182,7 @@ public class SchedulerService extends BaseService {
* @param warningType warning type
* @param warningGroupId warning group id
* @param failureStrategy failure strategy
* @param workerGroupId worker group id
* @param workerGroup worker group
* @param processInstancePriority process instance priority
* @param receiversCc receiver cc
* @param receivers receivers
@ -202,7 +202,7 @@ public class SchedulerService extends BaseService {
String receiversCc,
ReleaseState scheduleStatus,
Priority processInstancePriority,
int workerGroupId) throws IOException {
String workerGroup) throws IOException {
Map<String, Object> result = new HashMap<String, Object>(5);
Project project = projectMapper.queryByName(projectName);
@ -266,7 +266,7 @@ public class SchedulerService extends BaseService {
if (scheduleStatus != null) {
schedule.setReleaseState(scheduleStatus);
}
schedule.setWorkerGroupId(workerGroupId);
schedule.setWorkerGroup(workerGroup);
schedule.setUpdateTime(now);
schedule.setProcessInstancePriority(processInstancePriority);
scheduleMapper.updateById(schedule);

71
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UsersService.java

@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
@ -23,15 +25,10 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.EncryptionUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@ -39,6 +36,7 @@ import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.*;
import java.util.stream.Collectors;
/**
* user service
@ -72,6 +70,9 @@ public class UsersService extends BaseService {
@Autowired
private AlertGroupMapper alertGroupMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create user, only system admin have permission
@ -483,23 +484,71 @@ public class UsersService extends BaseService {
return result;
}
String[] resourceFullIdArr = resourceIds.split(",");
// need authorize resource id set
Set<Integer> needAuthorizeResIds = new HashSet();
for (String resourceFullId : resourceFullIdArr) {
String[] resourceIdArr = resourceFullId.split("-");
for (int i=0;i<=resourceIdArr.length-1;i++) {
int resourceIdValue = Integer.parseInt(resourceIdArr[i]);
needAuthorizeResIds.add(resourceIdValue);
}
}
//get the authorized resource id list by user id
List<Resource> oldAuthorizedRes = resourceMapper.queryAuthorizedResourceList(userId);
//if resource type is UDF,need check whether it is bound by UDF functon
Set<Integer> oldAuthorizedResIds = oldAuthorizedRes.stream().map(t -> t.getId()).collect(Collectors.toSet());
//get the unauthorized resource id list
oldAuthorizedResIds.removeAll(needAuthorizeResIds);
if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) {
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
resourceIdSet.retainAll(oldAuthorizedResIds);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
}
resourcesUserMapper.deleteResourceUser(userId, 0);
if (check(result, StringUtils.isEmpty(resourceIds), Status.SUCCESS)) {
return result;
}
String[] resourcesIdArr = resourceIds.split(",");
for (int resourceIdValue : needAuthorizeResIds) {
Resource resource = resourceMapper.selectById(resourceIdValue);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
for (String resourceId : resourcesIdArr) {
Date now = new Date();
ResourcesUser resourcesUser = new ResourcesUser();
resourcesUser.setUserId(userId);
resourcesUser.setResourcesId(Integer.parseInt(resourceId));
resourcesUser.setPerm(7);
resourcesUser.setResourcesId(resourceIdValue);
if (resource.isDirectory()) {
resourcesUser.setPerm(Constants.AUTHORIZE_READABLE_PERM);
}else{
resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM);
}
resourcesUser.setCreateTime(now);
resourcesUser.setUpdateTime(now);
resourcesUserMapper.insert(resourcesUser);
}
putMsg(result, Status.SUCCESS);

28
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/WorkerGroupService.java

@ -28,14 +28,12 @@ import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.*;
/**
* work group service
@ -50,6 +48,9 @@ public class WorkerGroupService extends BaseService {
@Autowired
ProcessInstanceMapper processInstanceMapper;
@Autowired
protected ZookeeperCachedOperator zookeeperCachedOperator;
/**
* create or update a worker group
*
@ -180,9 +181,22 @@ public class WorkerGroupService extends BaseService {
* @return all worker group list
*/
public Map<String,Object> queryAllGroup() {
Map<String, Object> result = new HashMap<>(5);
List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup();
result.put(Constants.DATA_LIST, workerGroupList);
Map<String, Object> result = new HashMap<>();
String workerPath = zookeeperCachedOperator.getZookeeperConfig().getDsRoot()+"/nodes" +"/worker";
List<String> workerGroupList = zookeeperCachedOperator.getChildrenKeys(workerPath);
// available workerGroup list
List<String> availableWorkerGroupList = new ArrayList<>();
for (String workerGroup : workerGroupList){
String workerGroupPath= workerPath + "/" + workerGroup;
List<String> childrenNodes = zookeeperCachedOperator.getChildrenKeys(workerGroupPath);
if (CollectionUtils.isNotEmpty(childrenNodes)){
availableWorkerGroupList.add(workerGroup);
}
}
result.put(Constants.DATA_LIST, availableWorkerGroupList);
putMsg(result, Status.SUCCESS);
return result;
}

53
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/utils/Result.java

@ -16,6 +16,10 @@
*/
package org.apache.dolphinscheduler.api.utils;
import org.apache.dolphinscheduler.api.enums.Status;
import java.text.MessageFormat;
/**
* result
*
@ -37,13 +41,58 @@ public class Result<T> {
*/
private T data;
public Result(){}
public Result() {
}
public Result(Integer code , String msg){
public Result(Integer code, String msg) {
this.code = code;
this.msg = msg;
}
private Result(T data) {
this.code = 0;
this.data = data;
}
private Result(Status status) {
if (status != null) {
this.code = status.getCode();
this.msg = status.getMsg();
}
}
/**
* Call this function if there is success
*
* @param data data
* @param <T> type
* @return resule
*/
public static <T> Result<T> success(T data) {
return new Result<>(data);
}
/**
* Call this function if there is any error
*
* @param status status
* @return result
*/
public static Result error(Status status) {
return new Result(status);
}
/**
* Call this function if there is any error
*
* @param status status
* @param args args
* @return result
*/
public static Result errorWithArgs(Status status, Object... args) {
return new Result(status.getCode(), MessageFormat.format(status.getMsg(), args));
}
public Integer getCode() {
return code;
}

62
dolphinscheduler-api/src/main/resources/logback-api.xml

@ -0,0 +1,62 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config end -->
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>
</configuration>

17
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/AccessTokenControllerTest.java

@ -56,6 +56,23 @@ public class AccessTokenControllerTest extends AbstractControllerTest{
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testExceptionHandler() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId","-1");
paramsMap.add("expireTime","2019-12-18 00:00:00");
paramsMap.add("token","507f5aeaaa2093dbdff5d5522ce00510");
MvcResult mvcResult = mockMvc.perform(post("/access-token/create")
.header("sessionId", sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.CREATE_ACCESS_TOKEN_ERROR.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testGenerateToken() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();

42
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/exceptions/ApiExceptionHandlerTest.java

@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.exceptions;
import org.apache.dolphinscheduler.api.controller.AccessTokenController;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.dao.entity.User;
import org.junit.Assert;
import org.junit.Test;
import org.springframework.web.method.HandlerMethod;
import java.lang.reflect.Method;
import static org.junit.Assert.*;
public class ApiExceptionHandlerTest {
@Test
public void exceptionHandler() throws NoSuchMethodException {
ApiExceptionHandler handler = new ApiExceptionHandler();
AccessTokenController controller = new AccessTokenController();
Method method = controller.getClass().getMethod("createToken", User.class, int.class, String.class, String.class);
HandlerMethod hm = new HandlerMethod(controller, method);
Result result = handler.exceptionHandler(new RuntimeException("test exception"), hm);
Assert.assertEquals(Status.CREATE_ACCESS_TOKEN_ERROR.getCode(),result.getCode().intValue());
}
}

30
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/DataAnalysisServiceTest.java

@ -28,8 +28,6 @@ import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -47,7 +45,6 @@ import java.util.List;
import java.util.Map;
@RunWith(PowerMockRunner.class)
@PrepareForTest({TaskQueueFactory.class})
public class DataAnalysisServiceTest {
@InjectMocks
@ -74,8 +71,7 @@ public class DataAnalysisServiceTest {
@Mock
TaskInstanceMapper taskInstanceMapper;
@Mock
ITaskQueue taskQueue;
@Mock
ProcessService processService;
@ -183,30 +179,6 @@ public class DataAnalysisServiceTest {
}
@Test
public void testCountQueueState(){
PowerMockito.mockStatic(TaskQueueFactory.class);
List<String> taskQueueList = new ArrayList<>(1);
taskQueueList.add("1_0_1_1_-1");
List<String> taskKillList = new ArrayList<>(1);
taskKillList.add("1-0");
PowerMockito.when(taskQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_QUEUE)).thenReturn(taskQueueList);
PowerMockito.when(taskQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_KILL)).thenReturn(taskKillList);
PowerMockito.when(TaskQueueFactory.getTaskQueueInstance()).thenReturn(taskQueue);
//checkProject false
Map<String, Object> result = dataAnalysisService.countQueueState(user,2);
Assert.assertTrue(result.isEmpty());
result = dataAnalysisService.countQueueState(user,1);
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
//admin
user.setUserType(UserType.ADMIN_USER);
result = dataAnalysisService.countQueueState(user,1);
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
}
/**
* get list
* @return

10
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ExecutorService2Test.java

@ -117,7 +117,7 @@ public class ExecutorService2Test {
null, null,
null, null, 0,
"", "", RunMode.RUN_MODE_SERIAL,
Priority.LOW, 0, 110);
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(1)).createCommand(any(Command.class));
}catch (Exception e){
@ -138,7 +138,7 @@ public class ExecutorService2Test {
null, null,
null, null, 0,
"", "", RunMode.RUN_MODE_SERIAL,
Priority.LOW, 0, 110);
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
Assert.assertEquals(Status.START_PROCESS_INSTANCE_ERROR, result.get(Constants.STATUS));
verify(processService, times(0)).createCommand(any(Command.class));
}catch (Exception e){
@ -159,7 +159,7 @@ public class ExecutorService2Test {
null, null,
null, null, 0,
"", "", RunMode.RUN_MODE_SERIAL,
Priority.LOW, 0, 110);
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(1)).createCommand(any(Command.class));
}catch (Exception e){
@ -180,7 +180,7 @@ public class ExecutorService2Test {
null, null,
null, null, 0,
"", "", RunMode.RUN_MODE_PARALLEL,
Priority.LOW, 0, 110);
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(31)).createCommand(any(Command.class));
}catch (Exception e){
@ -201,7 +201,7 @@ public class ExecutorService2Test {
null, null,
null, null, 0,
"", "", RunMode.RUN_MODE_PARALLEL,
Priority.LOW, 0, 110);
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
verify(processService, times(15)).createCommand(any(Command.class));
}catch (Exception e){

13
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/LoggerServiceTest.java

@ -52,12 +52,17 @@ public class LoggerServiceTest {
//TASK_INSTANCE_NOT_FOUND
Assert.assertEquals(Status.TASK_INSTANCE_NOT_FOUND.getCode(),result.getCode().intValue());
//HOST NOT FOUND
result = loggerService.queryLog(1,1,1);
try {
//HOST NOT FOUND OR ILLEGAL
result = loggerService.queryLog(1, 1, 1);
} catch (RuntimeException e) {
Assert.assertTrue(true);
logger.error("testQueryDataSourceList error {}", e.getMessage());
}
Assert.assertEquals(Status.TASK_INSTANCE_NOT_FOUND.getCode(),result.getCode().intValue());
//SUCCESS
taskInstance.setHost("127.0.0.1");
taskInstance.setHost("127.0.0.1:8080");
taskInstance.setLogPath("/temp/log");
Mockito.when(processService.findTaskInstanceById(1)).thenReturn(taskInstance);
result = loggerService.queryLog(1,1,1);
@ -87,7 +92,7 @@ public class LoggerServiceTest {
}
//success
taskInstance.setHost("127.0.0.1");
taskInstance.setHost("127.0.0.1:8080");
taskInstance.setLogPath("/temp/log");
//if use @RunWith(PowerMockRunner.class) mock object,sonarcloud will not calculate the coverage,
// so no assert will be added here

32
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.dolphinscheduler.api.ApiApplicationServer;
@ -28,7 +29,9 @@ import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
import org.apache.http.entity.ContentType;
import org.json.JSONException;
import org.junit.Assert;
@ -38,10 +41,12 @@ import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.quartz.Scheduler;
import org.skyscreamer.jsonassert.JSONAssert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.context.ApplicationContext;
import org.springframework.mock.web.MockMultipartFile;
import org.springframework.web.multipart.MultipartFile;
@ -274,6 +279,7 @@ public class ProcessDefinitionServiceTest {
@Test
public void testReleaseProcessDefinition() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
@ -298,20 +304,21 @@ public class ProcessDefinitionServiceTest {
46, ReleaseState.ONLINE.getCode());
Assert.assertEquals(Status.SUCCESS, onlineRes.get(Constants.STATUS));
//process definition offline
List<Schedule> schedules = new ArrayList<>();
Schedule schedule = getSchedule();
schedules.add(schedule);
Mockito.when(scheduleMapper.selectAllByProcessDefineArray(new int[]{46})).thenReturn(schedules);
Mockito.when(scheduleMapper.updateById(schedule)).thenReturn(1);
Map<String, Object> offlineRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
46, ReleaseState.OFFLINE.getCode());
Assert.assertEquals(Status.SUCCESS, offlineRes.get(Constants.STATUS));
//release error code
Map<String, Object> failRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
46, 2);
46, 2);
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, failRes.get(Constants.STATUS));
//FIXME has function exit code 1 when exception
//process definition offline
// List<Schedule> schedules = new ArrayList<>();
// Schedule schedule = getSchedule();
// schedules.add(schedule);
// Mockito.when(scheduleMapper.selectAllByProcessDefineArray(new int[]{46})).thenReturn(schedules);
// Mockito.when(scheduleMapper.updateById(schedule)).thenReturn(1);
// Map<String, Object> offlineRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
// 46, ReleaseState.OFFLINE.getCode());
// Assert.assertEquals(Status.SUCCESS, offlineRes.get(Constants.STATUS));
}
@Test
@ -803,7 +810,7 @@ public class ProcessDefinitionServiceTest {
schedule.setProcessInstancePriority(Priority.MEDIUM);
schedule.setWarningType(WarningType.NONE);
schedule.setWarningGroupId(1);
schedule.setWorkerGroupId(-1);
schedule.setWorkerGroup(Constants.DEFAULT_WORKER_GROUP);
return schedule;
}
@ -822,7 +829,6 @@ public class ProcessDefinitionServiceTest {
processMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
processMeta.setScheduleReleaseState(String.valueOf(schedule.getReleaseState()));
processMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
processMeta.setScheduleWorkerGroupId(schedule.getWorkerGroupId());
processMeta.setScheduleWorkerGroupName("workgroup1");
return processMeta;
}

14
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessInstanceServiceTest.java

@ -163,7 +163,6 @@ public class ProcessInstanceServiceTest {
//project auth success
ProcessInstance processInstance = getProcessInstance();
processInstance.setWorkerGroupId(-1);
processInstance.setReceivers("xxx@qq.com");
processInstance.setReceiversCc("xxx@qq.com");
processInstance.setProcessDefinitionId(46);
@ -178,16 +177,11 @@ public class ProcessInstanceServiceTest {
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
//worker group null
processInstance.setWorkerGroupId(1);
when(workerGroupMapper.selectById(processInstance.getWorkerGroupId())).thenReturn(null);
Map<String, Object> workerNullRes = processInstanceService.queryProcessInstanceById(loginUser, projectName, 1);
Assert.assertEquals(Status.SUCCESS, workerNullRes.get(Constants.STATUS));
//worker group exist
WorkerGroup workerGroup = getWorkGroup();
when(workerGroupMapper.selectById(processInstance.getWorkerGroupId())).thenReturn(workerGroup);
processInstance.setWorkerGroupId(1);
when(workerGroupMapper.selectById(processInstance.getWorkerGroupId())).thenReturn(null);
Map<String, Object> workerExistRes = processInstanceService.queryProcessInstanceById(loginUser, projectName, 1);
Assert.assertEquals(Status.SUCCESS, workerExistRes.get(Constants.STATUS));
}
@ -265,7 +259,7 @@ public class ProcessInstanceServiceTest {
//task not sub process
TaskInstance taskInstance = getTaskInstance();
taskInstance.setTaskType(TaskType.HTTP.getDescp());
taskInstance.setTaskType(TaskType.HTTP.toString());
taskInstance.setProcessInstanceId(1);
when(processService.findTaskInstanceById(1)).thenReturn(taskInstance);
Map<String, Object> notSubprocessRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectName, 1);
@ -273,7 +267,7 @@ public class ProcessInstanceServiceTest {
//sub process not exist
TaskInstance subTask = getTaskInstance();
subTask.setTaskType(TaskType.SUB_PROCESS.getDescp());
subTask.setTaskType(TaskType.SUB_PROCESS.toString());
subTask.setProcessInstanceId(1);
when(processService.findTaskInstanceById(subTask.getId())).thenReturn(subTask);
when(processService.findSubProcessInstance(subTask.getProcessInstanceId(), subTask.getId())).thenReturn(null);
@ -394,8 +388,6 @@ public class ProcessInstanceServiceTest {
//project auth fail
when(projectMapper.queryByName(projectName)).thenReturn(null);
when(projectService.checkProjectAndAuth(loginUser, null, projectName)).thenReturn(result);
Map<String, Object> proejctAuthFailRes = processInstanceService.deleteProcessInstanceById(loginUser, projectName, 1, Mockito.any());
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS));
//process instance null
Project project = getProject(projectName);
@ -403,8 +395,6 @@ public class ProcessInstanceServiceTest {
when(projectMapper.queryByName(projectName)).thenReturn(project);
when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
when(processService.findProcessInstanceDetailById(1)).thenReturn(null);
Map<String, Object> processInstanceNullRes = processInstanceService.deleteProcessInstanceById(loginUser, projectName, 1, Mockito.any());
Assert.assertEquals(Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceNullRes.get(Constants.STATUS));
}
@Test

2
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java

@ -242,7 +242,7 @@ public class ResourcesServiceTest {
User loginUser = new User();
loginUser.setId(0);
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0)).thenReturn(getResourceList());
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0,0)).thenReturn(getResourceList());
Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));

30
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java

@ -18,13 +18,16 @@ package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.avro.generic.GenericData;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.EncryptionUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
@ -68,6 +71,8 @@ public class UsersServiceTest {
private DataSourceUserMapper datasourceUserMapper;
@Mock
private AlertGroupMapper alertGroupMapper;
@Mock
private ResourceMapper resourceMapper;
private String queueName ="UsersServiceTestQueue";
@ -301,9 +306,13 @@ public class UsersServiceTest {
logger.info(result.toString());
Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS));
//success
when(resourceMapper.queryAuthorizedResourceList(1)).thenReturn(new ArrayList<Resource>());
when(resourceMapper.selectById(Mockito.anyInt())).thenReturn(getResource());
result = usersService.grantResources(loginUser, 1, resourceIds);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@ -476,11 +485,30 @@ public class UsersServiceTest {
return user;
}
/**
* get tenant
* @return tenant
*/
private Tenant getTenant(){
Tenant tenant = new Tenant();
tenant.setId(1);
return tenant;
}
/**
* get resource
* @return resource
*/
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
}

17
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/WorkerGroupServiceTest.java

@ -27,12 +27,15 @@ import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.internal.matchers.Any;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -52,11 +55,11 @@ public class WorkerGroupServiceTest {
private WorkerGroupMapper workerGroupMapper;
@Mock
private ProcessInstanceMapper processInstanceMapper;
@Mock
private ZookeeperCachedOperator zookeeperCachedOperator;
private String groupName="groupName000001";
/**
* create or update a worker group
*/
@ -129,8 +132,14 @@ public class WorkerGroupServiceTest {
}
@Test
public void testQueryAllGroup(){
Mockito.when(workerGroupMapper.queryAllWorkerGroup()).thenReturn(getList());
public void testQueryAllGroup() throws Exception {
ZookeeperConfig zookeeperConfig = new ZookeeperConfig();
zookeeperConfig.setDsRoot("/ds");
Mockito.when(zookeeperCachedOperator.getZookeeperConfig()).thenReturn(zookeeperConfig);
List<String> workerGroupStrList = new ArrayList<>();
workerGroupStrList.add("workerGroup1");
Mockito.when(zookeeperCachedOperator.getChildrenKeys(Mockito.anyString())).thenReturn(workerGroupStrList);
Map<String, Object> result = workerGroupService.queryAllGroup();
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),(String)result.get(Constants.MSG));

48
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/ResultTest.java

@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.utils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import static org.junit.Assert.*;
public class ResultTest {
@Test
public void success() {
HashMap<String, String> map = new HashMap<>();
map.put("testdata", "test");
Result ret = Result.success(map);
Assert.assertEquals(Status.SUCCESS.getCode(), ret.getCode().intValue());
}
@Test
public void error() {
Result ret = Result.error(Status.ACCESS_TOKEN_NOT_EXIST);
Assert.assertEquals(Status.ACCESS_TOKEN_NOT_EXIST.getCode(), ret.getCode().intValue());
}
@Test
public void errorWithArgs() {
Result ret = Result.errorWithArgs(Status.INTERNAL_SERVER_ERROR_ARGS, "test internal server error");
Assert.assertEquals(Status.INTERNAL_SERVER_ERROR_ARGS.getCode(), ret.getCode().intValue());
}
}

225
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java

@ -25,9 +25,45 @@ import java.util.regex.Pattern;
* Constants
*/
public final class Constants {
private Constants() {
throw new IllegalStateException("Constants class");
}
/**
* quartz config
*/
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock";
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
/**
* quartz config default value
*/
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
public static final String QUARTZ_DATASOURCE = "myDs";
public static final String QUARTZ_THREADCOUNT = "25";
public static final String QUARTZ_THREADPRIORITY = "5";
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
public static final String QUARTZ_INSTANCEID = "AUTO";
public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true";
/**
* common properties path
*/
@ -56,9 +92,11 @@ public final class Constants {
/**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
* yarn.resourcemanager.ha.rm.ids
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
/**
* yarn.application.status.address
@ -72,31 +110,25 @@ public final class Constants {
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs configuration
* data.store2hdfs.basepath
* hdfs/s3 configuration
* resource.upload.path
*/
public static final String DATA_STORE_2_HDFS_BASEPATH = "data.store2hdfs.basepath";
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
/**
* data.basedir.path
* data basedir path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* data.download.basedir.path
*/
public static final String DATA_DOWNLOAD_BASEDIR_PATH = "data.download.basedir.path";
/**
* process.exec.basepath
*/
public static final String PROCESS_EXEC_BASEPATH = "process.exec.basepath";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* environment properties default path
*/
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
/**
* python home
@ -108,30 +140,38 @@ public final class Constants {
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
/**
* string true
*/
public static final String STRING_TRUE = "true";
/**
* res.upload.startup.type
* string false
*/
public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
public static final String STRING_FALSE = "false";
/**
* zookeeper quorum
* resource storage type
*/
public static final String ZOOKEEPER_QUORUM = "zookeeper.quorum";
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/masters";
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/workers";
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
/**
* all servers directory registered in zookeeper
@ -143,10 +183,6 @@ public final class Constants {
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
/**
* WorkerServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_WORKERS = "/lock/workers";
/**
* MasterServer failover directory registered in zookeeper
@ -163,16 +199,17 @@ public final class Constants {
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
/**
* need send warn times when master server or worker server failover
*/
public static final int DOLPHINSCHEDULER_WARN_TIMES_FAILOVER = 3;
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* slash /
*/
public static final String SLASH = "/";
/**
* COLON :
*/
@ -198,37 +235,6 @@ public final class Constants {
*/
public static final String EQUAL_SIGN = "=";
/**
* ZOOKEEPER_SESSION_TIMEOUT
*/
public static final String ZOOKEEPER_SESSION_TIMEOUT = "zookeeper.session.timeout";
public static final String ZOOKEEPER_CONNECTION_TIMEOUT = "zookeeper.connection.timeout";
public static final String ZOOKEEPER_RETRY_SLEEP = "zookeeper.retry.sleep";
public static final String ZOOKEEPER_RETRY_BASE_SLEEP = "zookeeper.retry.base.sleep";
public static final String ZOOKEEPER_RETRY_MAX_SLEEP = "zookeeper.retry.max.sleep";
public static final String ZOOKEEPER_RETRY_MAXTIME = "zookeeper.retry.maxtime";
public static final String MASTER_HEARTBEAT_INTERVAL = "master.heartbeat.interval";
public static final String MASTER_EXEC_THREADS = "master.exec.threads";
public static final String MASTER_EXEC_TASK_THREADS = "master.exec.task.number";
public static final String MASTER_COMMIT_RETRY_TIMES = "master.task.commit.retryTimes";
public static final String MASTER_COMMIT_RETRY_INTERVAL = "master.task.commit.interval";
public static final String WORKER_EXEC_THREADS = "worker.exec.threads";
public static final String WORKER_HEARTBEAT_INTERVAL = "worker.heartbeat.interval";
public static final String WORKER_FETCH_TASK_NUM = "worker.fetch.task.num";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
@ -239,21 +245,6 @@ public final class Constants {
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* dolphinscheduler tasks queue
*/
public static final String DOLPHINSCHEDULER_TASKS_QUEUE = "tasks_queue";
/**
* dolphinscheduler need kill tasks queue
*/
public static final String DOLPHINSCHEDULER_TASKS_KILL = "tasks_kill";
public static final String ZOOKEEPER_DOLPHINSCHEDULER_ROOT = "zookeeper.dolphinscheduler.root";
public static final String SCHEDULER_QUEUE_IMPL = "dolphinscheduler.queue.impl";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
@ -345,26 +336,6 @@ public final class Constants {
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* heartbeat threads number
*/
public static final int DEFAUL_WORKER_HEARTBEAT_THREAD_NUM = 1;
/**
* heartbeat interval
*/
public static final int DEFAULT_WORKER_HEARTBEAT_INTERVAL = 60;
/**
* worker fetch task number
*/
public static final int DEFAULT_WORKER_FETCH_TASK_NUM = 1;
/**
* worker execute threads number
*/
public static final int DEFAULT_WORKER_EXEC_THREAD_NUM = 10;
/**
* master cpu load
*/
@ -386,16 +357,6 @@ public final class Constants {
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* master execute threads number
*/
public static final int DEFAULT_MASTER_EXEC_THREAD_NUM = 100;
/**
* default master concurrent task execute num
*/
public static final int DEFAULT_MASTER_TASK_EXEC_NUM = 20;
/**
* default log cache rows num,output when reach the number
@ -403,33 +364,11 @@ public final class Constants {
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
/**
* log flush intervaloutput when reach the interval
* log flush interval?output when reach the interval
*/
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
/**
* default master heartbeat thread number
*/
public static final int DEFAULT_MASTER_HEARTBEAT_THREAD_NUM = 1;
/**
* default master heartbeat interval
*/
public static final int DEFAULT_MASTER_HEARTBEAT_INTERVAL = 60;
/**
* default master commit retry times
*/
public static final int DEFAULT_MASTER_COMMIT_RETRY_TIMES = 5;
/**
* default master commit retry interval
*/
public static final int DEFAULT_MASTER_COMMIT_RETRY_INTERVAL = 3000;
/**
* time unit secong to minutes
*/
@ -448,9 +387,9 @@ public final class Constants {
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* task record configuration path
* datasource configuration path
*/
public static final String APPLICATION_PROPERTIES = "application.properties";
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
@ -568,7 +507,7 @@ public final class Constants {
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 7;
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 5;
/**
@ -864,7 +803,7 @@ public final class Constants {
*/
public static final String HIVE_CONF = "hiveconf:";
//flink 任务
//flink ??
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
@ -899,26 +838,20 @@ public final class Constants {
/**
* data total
* 数据总数
*/
public static final String COUNT = "count";
/**
* page size
* 每页数据条数
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
* 当前页码
*/
public static final String PAGE_NUMBER = "pageNo";
/**
* result
*/
public static final String RESULT = "result";
/**
*
@ -1001,10 +934,24 @@ public final class Constants {
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
/**
* default worker group
*/
public static final String DEFAULT_WORKER_GROUP = "default";
public static final Integer TASK_INFO_LENGTH = 5;
/**
* new
* schedule time
*/
public static final String PARAMETER_SHECDULE_TIME = "schedule.time";
/**
* authorize writable perm
*/
public static final int AUTHORIZE_WRITABLE_PERM=7;
/**
* authorize readable perm
*/
public static final int AUTHORIZE_READABLE_PERM=4;
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/CommandType.java

@ -65,4 +65,13 @@ public enum CommandType {
public String getDescp() {
return descp;
}
public static CommandType of(Integer status){
for(CommandType cmdType : values()){
if(cmdType.getCode() == status){
return cmdType;
}
}
throw new IllegalArgumentException("invalid status : " + status);
}
}

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbType.java

@ -57,4 +57,14 @@ public enum DbType {
public String getDescp() {
return descp;
}
public static DbType of(int type){
for(DbType ty : values()){
if(ty.getCode() == type){
return ty;
}
}
throw new IllegalArgumentException("invalid type : " + type);
}
}

9
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java

@ -128,4 +128,13 @@ public enum ExecutionStatus {
public String getDescp() {
return descp;
}
public static ExecutionStatus of(int status){
for(ExecutionStatus es : values()){
if(es.getCode() == status){
return es;
}
}
throw new IllegalArgumentException("invalid status : " + status);
}
}

35
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/TaskTimeoutStrategy.java

@ -16,14 +16,45 @@
*/
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
/**
* task timeout strategy
*/
public enum TaskTimeoutStrategy {
public enum TaskTimeoutStrategy {
/**
* 0 warn
* 1 failed
* 2 warn+failed
*/
WARN, FAILED, WARNFAILED
WARN(0, "warn"),
FAILED(1,"failed"),
WARNFAILED(2,"warnfailed");
TaskTimeoutStrategy(int code, String descp){
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
public static TaskTimeoutStrategy of(int status){
for(TaskTimeoutStrategy es : values()){
if(es.getCode() == status){
return es;
}
}
throw new IllegalArgumentException("invalid status : " + status);
}
}

11
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/UdfType.java

@ -44,4 +44,15 @@ public enum UdfType {
public String getDescp() {
return descp;
}
public static UdfType of(int type){
for(UdfType ut : values()){
if(ut.getCode() == type){
return ut;
}
}
throw new IllegalArgumentException("invalid type : " + type);
}
}

19
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java

@ -120,9 +120,9 @@ public class TaskNode {
private Priority taskInstancePriority;
/**
* worker group id
* worker group
*/
private int workerGroupId;
private String workerGroup;
/**
@ -236,8 +236,9 @@ public class TaskNode {
Objects.equals(extras, taskNode.extras) &&
Objects.equals(runFlag, taskNode.runFlag) &&
Objects.equals(dependence, taskNode.dependence) &&
Objects.equals(workerGroup, taskNode.workerGroup) &&
Objects.equals(conditionResult, taskNode.conditionResult) &&
Objects.equals(workerGroupId, taskNode.workerGroupId) &&
CollectionUtils.equalLists(depList, taskNode.depList);
}
@ -288,7 +289,7 @@ public class TaskNode {
/**
* get task time out parameter
* @return
* @return task time out parameter
*/
public TaskTimeoutParameter getTaskTimeoutParameter() {
if(StringUtils.isNotEmpty(this.getTimeout())){
@ -321,16 +322,16 @@ public class TaskNode {
", dependence='" + dependence + '\'' +
", taskInstancePriority=" + taskInstancePriority +
", timeout='" + timeout + '\'' +
", workerGroupId='" + workerGroupId + '\'' +
", workerGroup='" + workerGroup + '\'' +
'}';
}
public int getWorkerGroupId() {
return workerGroupId;
public String getWorkerGroup() {
return workerGroup;
}
public void setWorkerGroupId(int workerGroupId) {
this.workerGroupId = workerGroupId;
public void setWorkerGroup(String workerGroup) {
this.workerGroup = workerGroup;
}
public String getConditionResult() {

3
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/Property.java

@ -20,9 +20,10 @@ package org.apache.dolphinscheduler.common.process;
import org.apache.dolphinscheduler.common.enums.DataType;
import org.apache.dolphinscheduler.common.enums.Direct;
import java.io.Serializable;
import java.util.Objects;
public class Property {
public class Property implements Serializable {
/**
* key
*/

4
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java

@ -41,7 +41,7 @@ public abstract class AbstractParameters implements IParameters {
/**
* get local parameters list
* @return
* @return Property list
*/
public List<Property> getLocalParams() {
return localParams;
@ -53,7 +53,7 @@ public abstract class AbstractParameters implements IParameters {
/**
* get local parameters map
* @return
* @return parameters map
*/
public Map<String,Property> getLocalParametersMap() {
if (localParams != null) {

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java

@ -27,7 +27,7 @@ public interface IParameters {
/**
* check parameters is valid
*
* @return
* @return result
*/
boolean checkParameters();

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java

@ -34,6 +34,6 @@ public class Stopper {
}
public static final void stop(){
signal.getAndSet(true);
signal.set(true);
}
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadPoolExecutors.java

@ -71,7 +71,7 @@ public class ThreadPoolExecutors {
* Executes the given task sometime in the future. The task may execute in a new thread or in an existing pooled thread.
* If the task cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been reached,
* the task is handled by the current RejectedExecutionHandler.
* @param event
* @param event event
*/
public void execute(final Runnable event) {
Executor eventExecutor = getExecutor();

73
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/ThreadUtils.java

@ -33,10 +33,11 @@ public class ThreadUtils {
private static final int STACK_DEPTH = 20;
/**
Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
* @param prefix
* @return
*
* @param prefix prefix
* @return ThreadPoolExecutor
*/
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix){
ThreadFactory threadFactory = namedThreadFactory(prefix);
@ -45,8 +46,8 @@ public class ThreadUtils {
/**
* Create a thread factory that names threads with a prefix and also sets the threads to daemon.
* @param prefix
* @return
* @param prefix prefix
* @return ThreadFactory
*/
private static ThreadFactory namedThreadFactory(String prefix) {
return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(prefix + "-%d").build();
@ -56,10 +57,10 @@ public class ThreadUtils {
/**
* Create a cached thread pool whose max number of threads is `maxThreadNumber`. Thread names
* are formatted as prefix-ID, where ID is a unique, sequentially assigned integer.
* @param prefix
* @param maxThreadNumber
* @param keepAliveSeconds
* @return
* @param prefix prefix
* @param maxThreadNumber maxThreadNumber
* @param keepAliveSeconds keepAliveSeconds
* @return ThreadPoolExecutor
*/
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix ,
int maxThreadNumber,
@ -82,9 +83,9 @@ public class ThreadUtils {
/**
* Wrapper over newFixedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
* @param nThreads
* @param prefix
* @return
* @param nThreads nThreads
* @param prefix prefix
* @return ThreadPoolExecutor
*/
public static ThreadPoolExecutor newDaemonFixedThreadPool(int nThreads , String prefix){
ThreadFactory threadFactory = namedThreadFactory(prefix);
@ -93,8 +94,8 @@ public class ThreadUtils {
/**
* Wrapper over newSingleThreadExecutor.
* @param threadName
* @return
* @param threadName threadName
* @return ExecutorService
*/
public static ExecutorService newDaemonSingleThreadExecutor(String threadName){
ThreadFactory threadFactory = new ThreadFactoryBuilder()
@ -106,23 +107,22 @@ public class ThreadUtils {
/**
* Wrapper over newDaemonFixedThreadExecutor.
* @param threadName
* @param threadsNum
* @return
* @param threadName threadName
* @param threadsNum threadsNum
* @return ExecutorService
*/
public static ExecutorService newDaemonFixedThreadExecutor(String threadName,int threadsNum){
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(threadName)
.build();
return Executors.newFixedThreadPool(threadsNum,threadFactory);
return Executors.newFixedThreadPool(threadsNum, threadFactory);
}
/**
* Wrapper over ScheduledThreadPoolExecutor
* @param threadName
* @param corePoolSize
* @return
* @param threadName threadName
* @param corePoolSize corePoolSize
* @return ScheduledExecutorService
*/
public static ScheduledExecutorService newDaemonThreadScheduledExecutor(String threadName, int corePoolSize) {
return newThreadScheduledExecutor(threadName, corePoolSize, true);
@ -130,10 +130,10 @@ public class ThreadUtils {
/**
* Wrapper over ScheduledThreadPoolExecutor
* @param threadName
* @param corePoolSize
* @param isDaemon
* @return
* @param threadName threadName
* @param corePoolSize corePoolSize
* @param isDaemon isDaemon
* @return ScheduledThreadPoolExecutor
*/
public static ScheduledExecutorService newThreadScheduledExecutor(String threadName, int corePoolSize, boolean isDaemon) {
ThreadFactory threadFactory = new ThreadFactoryBuilder()
@ -147,6 +147,11 @@ public class ThreadUtils {
return executor;
}
/**
* get thread info
* @param t t
* @return thread info
*/
public static ThreadInfo getThreadInfo(Thread t) {
long tid = t.getId();
return threadBean.getThreadInfo(tid, STACK_DEPTH);
@ -155,7 +160,9 @@ public class ThreadUtils {
/**
* Format the given ThreadInfo object as a String.
* @param indent a prefix for each line, used for nested indentation
* @param threadInfo threadInfo
* @param indent indent
* @return threadInfo
*/
public static String formatThreadInfo(ThreadInfo threadInfo, String indent) {
StringBuilder sb = new StringBuilder();
@ -167,9 +174,9 @@ public class ThreadUtils {
/**
* Print all of the thread's information and stack traces.
*
* @param sb
* @param info
* @param indent
* @param sb StringBuilder
* @param info ThreadInfo
* @param indent indent
*/
public static void appendThreadInfo(StringBuilder sb,
ThreadInfo info,
@ -204,6 +211,12 @@ public class ThreadUtils {
}
}
/**
* getTaskName
* @param id id
* @param name name
* @return task name
*/
private static String getTaskName(long id, String name) {
if (name == null) {
return Long.toString(id);

29
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java

@ -20,13 +20,18 @@ import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.net.URL;
/**
* common utils
*/
public class CommonUtils {
public class CommonUtils {
private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class);
private CommonUtils() {
throw new IllegalStateException("CommonUtils class");
}
@ -37,25 +42,25 @@ public class CommonUtils {
public static String getSystemEnvPath() {
String envPath = PropertyUtils.getString(Constants.DOLPHINSCHEDULER_ENV_PATH);
if (StringUtils.isEmpty(envPath)) {
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
URL envDefaultPath = CommonUtils.class.getClassLoader().getResource(Constants.ENV_PATH);
if (envDefaultPath != null){
envPath = envDefaultPath.getPath();
logger.debug("env path :{}", envPath);
}else{
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
}
}
return envPath;
}
/**
* @return get queue implementation name
*/
public static String getQueueImplValue(){
return PropertyUtils.getString(Constants.SCHEDULER_QUEUE_IMPL);
}
/**
*
* @return is develop mode
*/
public static boolean isDevelopMode() {
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE);
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE, true);
}
@ -65,9 +70,9 @@ public class CommonUtils {
* @return true if upload resource is HDFS and kerberos startup
*/
public static boolean getKerberosStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE);
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false);
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/DateUtils.java

File diff suppressed because one or more lines are too long

38
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java

@ -34,6 +34,8 @@ import static org.apache.dolphinscheduler.common.Constants.*;
public class FileUtils {
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
public static final String DATA_BASEDIR = PropertyUtils.getString(DATA_BASEDIR_PATH,"/tmp/dolphinscheduler");
/**
* get file suffix
*
@ -59,7 +61,14 @@ public class FileUtils {
* @return download file name
*/
public static String getDownloadFilename(String filename) {
return String.format("%s/%s/%s", PropertyUtils.getString(DATA_DOWNLOAD_BASEDIR_PATH), DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
String fileName = String.format("%s/download/%s/%s", DATA_BASEDIR, DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
File file = new File(fileName);
if (!file.getParentFile().exists()){
file.getParentFile().mkdirs();
}
return fileName;
}
/**
@ -70,7 +79,13 @@ public class FileUtils {
* @return local file path
*/
public static String getUploadFilename(String tenantCode, String filename) {
return String.format("%s/%s/resources/%s", PropertyUtils.getString(DATA_BASEDIR_PATH), tenantCode, filename);
String fileName = String.format("%s/%s/resources/%s", DATA_BASEDIR, tenantCode, filename);
File file = new File(fileName);
if (!file.getParentFile().exists()){
file.getParentFile().mkdirs();
}
return fileName;
}
/**
@ -82,9 +97,14 @@ public class FileUtils {
* @return directory of process execution
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId, int taskInstanceId) {
return String.format("%s/process/%s/%s/%s/%s", PropertyUtils.getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
String fileName = String.format("%s/exec/process/%s/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId),Integer.toString(taskInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()){
file.getParentFile().mkdirs();
}
return fileName;
}
/**
@ -95,15 +115,21 @@ public class FileUtils {
* @return directory of process instances
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId) {
return String.format("%s/process/%s/%s/%s", PropertyUtils.getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
String fileName = String.format("%s/exec/process/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()){
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* @return get suffixes for resource files that support online viewing
*/
public static String getResourceViewSuffixs() {
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS);
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS, RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE);
}
/**

41
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java

@ -45,6 +45,8 @@ import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
/**
* hadoop utils
* single instance
@ -53,6 +55,10 @@ public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder
@ -65,11 +71,11 @@ public class HadoopUtils implements Closeable {
}
});
private static volatile boolean yarnEnabled = false;
private Configuration configuration;
private FileSystem fs;
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
private HadoopUtils() {
init();
initHdfsPath();
@ -83,9 +89,9 @@ public class HadoopUtils implements Closeable {
/**
* init dolphinscheduler root path in hdfs
*/
private void initHdfsPath() {
String hdfsPath = PropertyUtils.getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
Path path = new Path(hdfsPath);
Path path = new Path(resourceUploadPath);
try {
if (!fs.exists(path)) {
@ -104,11 +110,11 @@ public class HadoopUtils implements Closeable {
try {
configuration = new Configuration();
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
if (resUploadType == ResUploadType.HDFS) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
@ -162,9 +168,17 @@ public class HadoopUtils implements Closeable {
String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
if (!StringUtils.isEmpty(rmHaIds)) {
//not use resourcemanager
if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
yarnEnabled = false;
} else if (!StringUtils.isEmpty(rmHaIds)) {
//resourcemanager HA enabled
appAddress = getAppAddress(appAddress, rmHaIds);
yarnEnabled = true;
logger.info("appAddress : {}", appAddress);
} else {
//single resourcemanager enabled
yarnEnabled = true;
}
configuration.set(Constants.YARN_APPLICATION_STATUS_ADDRESS, appAddress);
} catch (Exception e) {
@ -364,6 +378,13 @@ public class HadoopUtils implements Closeable {
return fs.rename(new Path(src), new Path(dst));
}
/**
* hadoop resourcemanager enabled or not
* @return result
*/
public boolean isYarnEnabled() {
return yarnEnabled;
}
/**
* get the state of an application
@ -404,15 +425,15 @@ public class HadoopUtils implements Closeable {
}
/**
* get data hdfs path
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
String basePath = PropertyUtils.getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
if ("/".equals(basePath)) {
if ("/".equals(resourceUploadPath)) {
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
return "";
} else {
return basePath;
return resourceUploadPath;
}
}

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java

@ -416,6 +416,8 @@ public class OSUtils {
/**
* check memory and cpu usage
* @param systemCpuLoad systemCpuLoad
* @param systemReservedMemory systemReservedMemory
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory){

20
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/Preconditions.java

@ -34,10 +34,9 @@ public final class Preconditions {
* Ensures that the given object reference is not null.
* Upon violation, a {@code NullPointerException} with no message is thrown.
*
* @param reference The object reference
* @return The object reference itself (generically typed).
*
* @throws NullPointerException Thrown, if the passed reference was null.
* @param reference reference
* @param <T> T
* @return T
*/
public static <T> T checkNotNull(T reference) {
if (reference == null) {
@ -49,12 +48,10 @@ public final class Preconditions {
/**
* Ensures that the given object reference is not null.
* Upon violation, a {@code NullPointerException} with the given message is thrown.
*
* @param reference The object reference
* @param errorMessage The message for the {@code NullPointerException} that is thrown if the check fails.
* @return The object reference itself (generically typed).
*
* @throws NullPointerException Thrown, if the passed reference was null.
* @param reference reference
* @param errorMessage errorMessage
* @param <T> T
* @return T
*/
public static <T> T checkNotNull(T reference, String errorMessage) {
if (reference == null) {
@ -78,9 +75,8 @@ public final class Preconditions {
* @param errorMessageArgs The arguments for the error message, to be inserted into the
* message template for the {@code %s} placeholders.
*
* @param <T> T
* @return The object reference itself (generically typed).
*
* @throws NullPointerException Thrown, if the passed reference was null.
*/
public static <T> T checkNotNull(T reference,
String errorMessageTemplate,

32
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/PropertyUtils.java

@ -71,8 +71,8 @@ public class PropertyUtils {
*
* @return judge whether resource upload startup
*/
public static boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
public static Boolean getResUploadStartupState(){
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;
}
@ -87,6 +87,18 @@ public class PropertyUtils {
return properties.getProperty(key.trim());
}
/**
* get property value
*
* @param key property name
* @param defaultVal default value
* @return property value
*/
public static String getString(String key, String defaultVal) {
String val = properties.getProperty(key.trim());
return val == null ? defaultVal : val;
}
/**
* get property value
*
@ -132,6 +144,22 @@ public class PropertyUtils {
return false;
}
/**
* get property value
*
* @param key property name
* @param defaultValue default value
* @return property value
*/
public static Boolean getBoolean(String key, boolean defaultValue) {
String value = properties.getProperty(key.trim());
if(null != value){
return Boolean.parseBoolean(value);
}
return defaultValue;
}
/**
* get property long value
* @param key key

51
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ResInfo.java

@ -89,45 +89,6 @@ public class ResInfo {
}
/**
* get heart beat info
* @param now now
* @return heart beat info
*/
public static String getHeartBeatInfo(Date now){
return buildHeartbeatForZKInfo(OSUtils.getHost(),
OSUtils.getProcessID(),
OSUtils.cpuUsage(),
OSUtils.memoryUsage(),
OSUtils.loadAverage(),
DateUtils.dateToString(now),
DateUtils.dateToString(now));
}
/**
* build heartbeat info for zk
* @param host host
* @param port port
* @param cpuUsage cpu usage
* @param memoryUsage memory usage
* @param loadAverage load average
* @param createTime create time
* @param lastHeartbeatTime last heartbeat time
* @return heartbeat info
*/
public static String buildHeartbeatForZKInfo(String host , int port ,
double cpuUsage , double memoryUsage,double loadAverage,
String createTime,String lastHeartbeatTime){
return host + Constants.COMMA + port + Constants.COMMA
+ cpuUsage + Constants.COMMA
+ memoryUsage + Constants.COMMA
+ loadAverage + Constants.COMMA
+ createTime + Constants.COMMA
+ lastHeartbeatTime;
}
/**
* parse heartbeat info for zk
* @param heartBeatInfo heartbeat info
@ -143,13 +104,11 @@ public class ResInfo {
}
Server masterServer = new Server();
masterServer.setHost(masterArray[0]);
masterServer.setPort(Integer.parseInt(masterArray[1]));
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[2]),
Double.parseDouble(masterArray[3]),
Double.parseDouble(masterArray[4])));
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[5]));
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[6]));
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[0]),
Double.parseDouble(masterArray[1]),
Double.parseDouble(masterArray[2])));
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[3]));
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[4]));
return masterServer;
}

51
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/dependent/DependentDateUtils.java

@ -27,9 +27,9 @@ public class DependentDateUtils {
/**
* get last day interval list
* @param businessDate
* @param hourNumber
* @return
* @param businessDate businessDate
* @param hourNumber hourNumber
* @return DateInterval list
*/
public static List<DateInterval> getLastHoursInterval(Date businessDate, int hourNumber){
List<DateInterval> dateIntervals = new ArrayList<>();
@ -44,8 +44,8 @@ public class DependentDateUtils {
/**
* get today day interval list
* @param businessDate
* @return
* @param businessDate businessDate
* @return DateInterval list
*/
public static List<DateInterval> getTodayInterval(Date businessDate){
@ -59,9 +59,9 @@ public class DependentDateUtils {
/**
* get last day interval list
* @param businessDate
* @param someDay
* @return
* @param businessDate businessDate
* @param someDay someDay
* @return DateInterval list
*/
public static List<DateInterval> getLastDayInterval(Date businessDate, int someDay){
@ -78,8 +78,8 @@ public class DependentDateUtils {
/**
* get interval between this month first day and businessDate
* @param businessDate
* @return
* @param businessDate businessDate
* @return DateInterval list
*/
public static List<DateInterval> getThisMonthInterval(Date businessDate) {
Date firstDay = DateUtils.getFirstDayOfMonth(businessDate);
@ -88,8 +88,8 @@ public class DependentDateUtils {
/**
* get interval between last month first day and last day
* @param businessDate
* @return
* @param businessDate businessDate
* @return DateInterval list
*/
public static List<DateInterval> getLastMonthInterval(Date businessDate) {
@ -102,11 +102,12 @@ public class DependentDateUtils {
/**
* get interval on first/last day of the last month
* @param businessDate
* @param isBeginDay
* @return
* @param businessDate businessDate
* @param isBeginDay isBeginDay
* @return DateInterval list
*/
public static List<DateInterval> getLastMonthBeginInterval(Date businessDate, boolean isBeginDay) {
public static List<DateInterval> getLastMonthBeginInterval(Date businessDate,
boolean isBeginDay) {
Date firstDayThisMonth = DateUtils.getFirstDayOfMonth(businessDate);
Date lastDay = DateUtils.getSomeDay(firstDayThisMonth, -1);
@ -120,8 +121,8 @@ public class DependentDateUtils {
/**
* get interval between monday to businessDate of this week
* @param businessDate
* @return
* @param businessDate businessDate
* @return DateInterval list
*/
public static List<DateInterval> getThisWeekInterval(Date businessDate) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
@ -131,8 +132,8 @@ public class DependentDateUtils {
/**
* get interval between monday to sunday of last week
* default set monday the first day of week
* @param businessDate
* @return
* @param businessDate businessDate
* @return DateInterval list
*/
public static List<DateInterval> getLastWeekInterval(Date businessDate) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
@ -144,9 +145,9 @@ public class DependentDateUtils {
/**
* get interval on the day of last week
* default set monday the first day of week
* @param businessDate
* @param businessDate businessDate
* @param dayOfWeek monday:1,tuesday:2,wednesday:3,thursday:4,friday:5,saturday:6,sunday:7
* @return
* @return DateInterval list
*/
public static List<DateInterval> getLastWeekOneDayInterval(Date businessDate, int dayOfWeek) {
Date mondayThisWeek = DateUtils.getMonday(businessDate);
@ -156,6 +157,12 @@ public class DependentDateUtils {
return getDateIntervalListBetweenTwoDates(destDay, destDay);
}
/**
* get date interval list between two dates
* @param firstDay firstDay
* @param lastDay lastDay
* @return DateInterval list
*/
public static List<DateInterval> getDateIntervalListBetweenTwoDates(Date firstDay, Date lastDay) {
List<DateInterval> dateIntervals = new ArrayList<>();
while(!firstDay.after(lastDay)){

10
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/placeholder/PlaceholderUtils.java

@ -36,6 +36,7 @@ public class PlaceholderUtils {
/**
* The suffix of the position to be replaced
*/
public static final String PLACEHOLDER_SUFFIX = "}";
@ -43,11 +44,14 @@ public class PlaceholderUtils {
* Replaces all placeholders of format {@code ${name}} with the value returned
* from the supplied {@link PropertyPlaceholderHelper.PlaceholderResolver}.
*
* @param value the value containing the placeholders to be replaced
* @param paramsMap placeholder data dictionary
* @param value the value containing the placeholders to be replaced
* @param paramsMap placeholder data dictionary
* @param ignoreUnresolvablePlaceholders ignoreUnresolvablePlaceholders
* @return the supplied value with placeholders replaced inline
*/
public static String replacePlaceholders(String value, Map<String, String> paramsMap, boolean ignoreUnresolvablePlaceholders) {
public static String replacePlaceholders(String value,
Map<String, String> paramsMap,
boolean ignoreUnresolvablePlaceholders) {
//replacement tool, parameter key will be replaced by value,if can't match , will throw an exception
PropertyPlaceholderHelper strictHelper = getPropertyPlaceholderHelper(false);

80
dolphinscheduler-common/src/main/resources/common.properties

@ -15,80 +15,52 @@
# limitations under the License.
#
#task queue implementation, default "zookeeper"
dolphinscheduler.queue.impl=zookeeper
# resource storage type : HDFS,S3,NONE
resource.storage.type=NONE
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
zookeeper.quorum=localhost:2181
#dolphinscheduler root directory
zookeeper.dolphinscheduler.root=/dolphinscheduler
#dolphinscheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.base.sleep=100
zookeeper.retry.max.sleep=30000
zookeeper.retry.maxtime=5
# resource upload startup type : HDFS,S3,NONE
res.upload.startup.type=NONE
# Users who have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
data.store2hdfs.basepath=/dolphinscheduler
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/dolphinscheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/dolphinscheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/dolphinscheduler/exec
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
#resource.upload.path=/dolphinscheduler
# user data local directory path, please make sure the directory exists and have read write permissions
#data.basedir.path=/tmp/dolphinscheduler
# whether kerberos starts
hadoop.security.authentication.startup.state=false
#hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path
java.security.krb5.conf.path=/opt/krb5.conf
#java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user
login.user.keytab.username=hdfs-mycluster@ESZ.COM
#login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path
login.user.keytab.path=/opt/hdfs.headless.keytab
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
dolphinscheduler.env.path=/opt/dolphinscheduler_env.sh
#login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
# is development state? default "false"
development.state=true
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
hdfs.root.user=hdfs
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
# to the conf directory,support s3,for example : s3a://dolphinscheduler
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
fs.defaultFS=hdfs://mycluster:8020
# s3 need,s3 endpoint
fs.s3a.endpoint=http://192.168.199.91:9010
# if resource.storage.type=S3,s3 endpoint
#fs.s3a.endpoint=http://192.168.199.91:9010
# s3 need,s3 access key
fs.s3a.access.key=A3DXS30FO22544RE
# if resource.storage.type=S3,s3 access key
#fs.s3a.access.key=A3DXS30FO22544RE
# s3 need,s3 secret key
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
# if resource.storage.type=S3,s3 secret key
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
#resourcemanager ha note this need ips , this empty if single
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
kerberos.expire.time=7
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions, TODO
#dolphinscheduler.env.path=env/dolphinscheduler_env.sh
kerberos.expire.time=7

169
dolphinscheduler-common/src/main/resources/logback.xml

@ -1,169 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- master server logback config start -->
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-master.log</file>
<!--<filter class="org.apache.dolphinscheduler.common.log.MasterLogFilter">
<level>INFO</level>
</filter>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- master server logback config end -->
<!-- worker server logback config start -->
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.common.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter"/>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config end -->
<!-- alert server logback config start -->
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-alert.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>20</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- alert server logback config end -->
<!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config end -->
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<if condition='p("server").contains("master-server")'>
<then>
<appender-ref ref="MASTERLOGFILE"/>
</then>
</if>
<if condition='p("server").contains("worker-server")'>
<then>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</then>
</if>
<if condition='p("server").contains("alert-server")'>
<then>
<appender-ref ref="ALERTLOGFILE"/>
</then>
</if>
<if condition='p("server").contains("api-server")'>
<then>
<appender-ref ref="APILOGFILE"/>
</then>
</if>
</root>
</configuration>

5
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java

@ -35,11 +35,6 @@ public class CommonUtilsTest {
Assert.assertTrue(true);
}
@Test
public void getQueueImplValue(){
logger.info(CommonUtils.getQueueImplValue());
Assert.assertTrue(true);
}
@Test
public void isDevelopMode() {
logger.info("develop mode: {}",CommonUtils.isDevelopMode());
Assert.assertTrue(true);

16
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java

@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
@ -85,4 +86,19 @@ public class HadoopUtilsTest {
List<String> stringList = HadoopUtils.getInstance().catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py", 0, 1000);
logger.info(String.join(",",stringList));
}
@Test
public void getHdfsFileNameTest(){
logger.info(HadoopUtils.getHdfsFileName(ResourceType.FILE,"test","/test"));
}
@Test
public void getHdfsResourceFileNameTest(){
logger.info(HadoopUtils.getHdfsResourceFileName("test","/test"));
}
@Test
public void getHdfsUdfFileNameTest(){
logger.info(HadoopUtils.getHdfsUdfFileName("test","/test.jar"));
}
}

64
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/placeholder/TimePlaceholderUtilsTest.java

@ -32,37 +32,37 @@ public class TimePlaceholderUtilsTest {
date = DateUtils.parse("20170101010101","yyyyMMddHHmmss");
}
@Test
public void replacePlaceholdersT() {
Assert.assertEquals("2017test12017:***2016-12-31,20170102,20170130,20161227,20161231", TimePlaceholderUtils.replacePlaceholders("$[yyyy]test1$[yyyy:***]$[yyyy-MM-dd-1],$[month_begin(yyyyMMdd, 1)],$[month_end(yyyyMMdd, -1)],$[week_begin(yyyyMMdd, 1)],$[week_end(yyyyMMdd, -1)]",
date, true));
Assert.assertEquals("1483200061,1483290061,1485709261,1482771661,1483113600,1483203661", TimePlaceholderUtils.replacePlaceholders("$[timestamp(yyyyMMdd00mmss)],"
+ "$[timestamp(month_begin(yyyyMMddHHmmss, 1))],"
+ "$[timestamp(month_end(yyyyMMddHHmmss, -1))],"
+ "$[timestamp(week_begin(yyyyMMddHHmmss, 1))],"
+ "$[timestamp(week_end(yyyyMMdd000000, -1))],"
+ "$[timestamp(yyyyMMddHHmmss)]",
date, true));
}
@Test
public void calcMinutesT() {
Assert.assertEquals("Sun Jan 01 01:01:01 CST 2017=yyyy", TimePlaceholderUtils.calcMinutes("yyyy", date).toString());
Assert.assertEquals("Sun Jan 08 01:01:01 CST 2017=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd+7*1", date).toString());
Assert.assertEquals("Sun Dec 25 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd-7*1", date).toString());
Assert.assertEquals("Mon Jan 02 01:01:01 CST 2017=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd+1", date).toString());
Assert.assertEquals("Sat Dec 31 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd-1", date).toString());
Assert.assertEquals("Sun Jan 01 02:01:01 CST 2017=yyyyMMddHH", TimePlaceholderUtils.calcMinutes("yyyyMMddHH+1/24", date).toString());
Assert.assertEquals("Sun Jan 01 00:01:01 CST 2017=yyyyMMddHH", TimePlaceholderUtils.calcMinutes("yyyyMMddHH-1/24", date).toString());
}
@Test
public void calcMonthsT() {
Assert.assertEquals("Mon Jan 01 01:01:01 CST 2018=yyyyMMdd", TimePlaceholderUtils.calcMonths("add_months(yyyyMMdd,12*1)", date).toString());
Assert.assertEquals("Fri Jan 01 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMonths("add_months(yyyyMMdd,-12*1)", date).toString());
}
// @Test
// public void replacePlaceholdersT() {
// Assert.assertEquals("2017test12017:***2016-12-31,20170102,20170130,20161227,20161231", TimePlaceholderUtils.replacePlaceholders("$[yyyy]test1$[yyyy:***]$[yyyy-MM-dd-1],$[month_begin(yyyyMMdd, 1)],$[month_end(yyyyMMdd, -1)],$[week_begin(yyyyMMdd, 1)],$[week_end(yyyyMMdd, -1)]",
// date, true));
//
// Assert.assertEquals("1483200061,1483290061,1485709261,1482771661,1483113600,1483203661", TimePlaceholderUtils.replacePlaceholders("$[timestamp(yyyyMMdd00mmss)],"
// + "$[timestamp(month_begin(yyyyMMddHHmmss, 1))],"
// + "$[timestamp(month_end(yyyyMMddHHmmss, -1))],"
// + "$[timestamp(week_begin(yyyyMMddHHmmss, 1))],"
// + "$[timestamp(week_end(yyyyMMdd000000, -1))],"
// + "$[timestamp(yyyyMMddHHmmss)]",
// date, true));
// }
//
//
//
// @Test
// public void calcMinutesT() {
// Assert.assertEquals("Sun Jan 01 01:01:01 CST 2017=yyyy", TimePlaceholderUtils.calcMinutes("yyyy", date).toString());
// Assert.assertEquals("Sun Jan 08 01:01:01 CST 2017=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd+7*1", date).toString());
// Assert.assertEquals("Sun Dec 25 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd-7*1", date).toString());
// Assert.assertEquals("Mon Jan 02 01:01:01 CST 2017=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd+1", date).toString());
// Assert.assertEquals("Sat Dec 31 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMinutes("yyyyMMdd-1", date).toString());
// Assert.assertEquals("Sun Jan 01 02:01:01 CST 2017=yyyyMMddHH", TimePlaceholderUtils.calcMinutes("yyyyMMddHH+1/24", date).toString());
// Assert.assertEquals("Sun Jan 01 00:01:01 CST 2017=yyyyMMddHH", TimePlaceholderUtils.calcMinutes("yyyyMMddHH-1/24", date).toString());
// }
//
// @Test
// public void calcMonthsT() {
// Assert.assertEquals("Mon Jan 01 01:01:01 CST 2018=yyyyMMdd", TimePlaceholderUtils.calcMonths("add_months(yyyyMMdd,12*1)", date).toString());
// Assert.assertEquals("Fri Jan 01 01:01:01 CST 2016=yyyyMMdd", TimePlaceholderUtils.calcMonths("add_months(yyyyMMdd,-12*1)", date).toString());
// }
}

5
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java

@ -135,11 +135,14 @@ public class AlertDao extends AbstractBaseDao {
alertMapper.insert(alert);
}
/**
* task timeout warn
* @param alertgroupId alertgroupId
* @param receivers receivers
* @param receiversCc receiversCc
* @param processInstanceId processInstanceId
* @param processInstanceName processInstanceName
* @param taskId taskId
* @param taskName taskName
*/
@ -171,7 +174,7 @@ public class AlertDao extends AbstractBaseDao {
/**
* for test
* @return
* @return AlertMapper
*/
public AlertMapper getAlertMapper() {
return alertMapper;

34
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/TaskRecordDao.java

@ -16,6 +16,9 @@
*/
package org.apache.dolphinscheduler.dao;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.TaskRecordStatus;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
@ -23,9 +26,7 @@ import org.apache.dolphinscheduler.common.utils.ConnectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.TaskRecord;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.dolphinscheduler.dao.utils.PropertyUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -44,26 +45,11 @@ public class TaskRecordDao {
private static Logger logger = LoggerFactory.getLogger(TaskRecordDao.class.getName());
/**
* load conf
*/
private static Configuration conf;
static {
try {
conf = new PropertiesConfiguration(Constants.APPLICATION_PROPERTIES);
} catch (ConfigurationException e) {
logger.error("load configuration exception", e);
System.exit(1);
}
}
/**
* get task record flag
*
* get task record flag
* @return whether startup taskrecord
*/
public static boolean getTaskRecordFlag() {
return conf.getBoolean(Constants.TASK_RECORD_FLAG);
public static boolean getTaskRecordFlag(){
return PropertyUtils.getBoolean(Constants.TASK_RECORD_FLAG,false);
}
/**
@ -76,9 +62,9 @@ public class TaskRecordDao {
return null;
}
String driver = "com.mysql.jdbc.Driver";
String url = conf.getString(Constants.TASK_RECORD_URL);
String username = conf.getString(Constants.TASK_RECORD_USER);
String password = conf.getString(Constants.TASK_RECORD_PWD);
String url = PropertyUtils.getString(Constants.TASK_RECORD_URL);
String username = PropertyUtils.getString(Constants.TASK_RECORD_USER);
String password = PropertyUtils.getString(Constants.TASK_RECORD_PWD);
Connection conn = null;
try {
//classLoader,load driver

1
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/BaseDataSource.java

@ -81,6 +81,7 @@ public abstract class BaseDataSource {
/**
* gets the JDBC url for the data source connection
* @return getJdbcUrl
*/
public String getJdbcUrl() {
StringBuilder jdbcUrl = new StringBuilder(getAddress());

27
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/ConnectionFactory.java

@ -83,32 +83,7 @@ public class ConnectionFactory extends SpringConnectionFactory {
*/
private DataSource buildDataSource() {
DruidDataSource druidDataSource = new DruidDataSource();
druidDataSource.setDriverClassName(conf.getString(Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME));
druidDataSource.setUrl(conf.getString(Constants.SPRING_DATASOURCE_URL));
druidDataSource.setUsername(conf.getString(Constants.SPRING_DATASOURCE_USERNAME));
druidDataSource.setPassword(conf.getString(Constants.SPRING_DATASOURCE_PASSWORD));
druidDataSource.setValidationQuery(conf.getString(Constants.SPRING_DATASOURCE_VALIDATION_QUERY));
druidDataSource.setPoolPreparedStatements(conf.getBoolean(Constants.SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS));
druidDataSource.setTestWhileIdle(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_WHILE_IDLE));
druidDataSource.setTestOnBorrow(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_BORROW));
druidDataSource.setTestOnReturn(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_RETURN));
druidDataSource.setKeepAlive(conf.getBoolean(Constants.SPRING_DATASOURCE_KEEP_ALIVE));
druidDataSource.setMinIdle(conf.getInt(Constants.SPRING_DATASOURCE_MIN_IDLE));
druidDataSource.setMaxActive(conf.getInt(Constants.SPRING_DATASOURCE_MAX_ACTIVE));
druidDataSource.setMaxWait(conf.getInt(Constants.SPRING_DATASOURCE_MAX_WAIT));
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(conf.getInt(Constants.SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE));
druidDataSource.setInitialSize(conf.getInt(Constants.SPRING_DATASOURCE_INITIAL_SIZE));
druidDataSource.setTimeBetweenEvictionRunsMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS));
druidDataSource.setTimeBetweenConnectErrorMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS));
druidDataSource.setMinEvictableIdleTimeMillis(conf.getLong(Constants.SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS));
druidDataSource.setValidationQueryTimeout(conf.getInt(Constants.SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT));
//auto commit
druidDataSource.setDefaultAutoCommit(conf.getBoolean(Constants.SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT));
DruidDataSource druidDataSource = dataSource();
return druidDataSource;
}

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/DB2ServerDataSource.java

@ -25,7 +25,8 @@ import org.apache.dolphinscheduler.common.enums.DbType;
public class DB2ServerDataSource extends BaseDataSource {
/**
* @return driver class
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String driverClassSelector() {

6
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/DataSourceFactory.java

@ -29,6 +29,12 @@ public class DataSourceFactory {
private static final Logger logger = LoggerFactory.getLogger(DataSourceFactory.class);
/**
* getDatasource
* @param dbType dbType
* @param parameter parameter
* @return getDatasource
*/
public static BaseDataSource getDatasource(DbType dbType, String parameter) {
try {
switch (dbType) {

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/HiveDataSource.java

@ -25,7 +25,8 @@ import org.apache.dolphinscheduler.common.enums.DbType;
public class HiveDataSource extends BaseDataSource {
/**
* @return driver class
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String driverClassSelector() {

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/MySQLDataSource.java

@ -25,7 +25,8 @@ import org.apache.dolphinscheduler.common.enums.DbType;
public class MySQLDataSource extends BaseDataSource {
/**
* @return driver class
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String driverClassSelector() {

16
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java

@ -27,8 +27,6 @@ import org.slf4j.LoggerFactory;
*/
public class OracleDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(OracleDataSource.class);
private DbConnectType type;
public DbConnectType getType() {
@ -47,6 +45,19 @@ public class OracleDataSource extends BaseDataSource {
return Constants.COM_ORACLE_JDBC_DRIVER;
}
/**
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String getJdbcUrl() {
String jdbcUrl = getAddress();
if (jdbcUrl.lastIndexOf("/") != (jdbcUrl.length() - 1)) {
jdbcUrl += "/";
}
return jdbcUrl;
}
/**
* @return db type
*/
@ -54,4 +65,5 @@ public class OracleDataSource extends BaseDataSource {
public DbType dbTypeSelector() {
return DbType.ORACLE;
}
}

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/PostgreDataSource.java

@ -25,7 +25,8 @@ import org.apache.dolphinscheduler.common.enums.DbType;
public class PostgreDataSource extends BaseDataSource {
/**
* @return driver class
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String driverClassSelector() {

46
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SQLServerDataSource.java

@ -18,12 +18,58 @@ package org.apache.dolphinscheduler.dao.datasource;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* data source of SQL Server
*/
public class SQLServerDataSource extends BaseDataSource {
private static final Logger logger = LoggerFactory.getLogger(SQLServerDataSource.class);
/**
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String getJdbcUrl() {
String jdbcUrl = getAddress();
jdbcUrl += ";databaseName=" + getDatabase();
if (StringUtils.isNotEmpty(getOther())) {
jdbcUrl += ";" + getOther();
}
return jdbcUrl;
}
/**
* test whether the data source can be connected successfully
*/
@Override
public void isConnectable() {
Connection con = null;
try {
Class.forName(Constants.COM_SQLSERVER_JDBC_DRIVER);
con = DriverManager.getConnection(getJdbcUrl(), getUser(), getPassword());
} catch (Exception e) {
logger.error("error", e);
} finally {
if (con != null) {
try {
con.close();
} catch (SQLException e) {
logger.error("SQL Server datasource try conn close conn error", e);
}
}
}
}
/**
* @return driver class
*/

3
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/SparkDataSource.java

@ -25,7 +25,8 @@ import org.apache.dolphinscheduler.common.enums.DbType;
public class SparkDataSource extends BaseDataSource {
/**
* @return driver class
* gets the JDBC url for the data source connection
* @return jdbc url
*/
@Override
public String driverClassSelector() {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save