diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java index fe83fa1d36..e8447fa079 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java @@ -103,6 +103,11 @@ public final class Constants { */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; + /** + * yarn.job.history.status.address + */ + public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; + /** * hdfs configuration * hdfs.root.user @@ -792,6 +797,10 @@ public final class Constants { */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; + /** + * com.amazonaws.services.s3.enableV4 + */ + public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java index ffead0b298..667838d28c 100644 --- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java +++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java @@ -70,6 +70,7 @@ public class HadoopUtils implements Closeable { public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler"); public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS); public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS); + public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS); private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY"; @@ -122,7 +123,7 @@ public class HadoopUtils implements Closeable { try { configuration = new HdfsConfiguration(); - String resourceStorageType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE); + String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE); ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType); if (resUploadType == ResUploadType.HDFS){ @@ -155,22 +156,18 @@ public class HadoopUtils implements Closeable { logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS); } - if (fs == null) { if (StringUtils.isNotEmpty(hdfsUser)) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Boolean run() throws Exception { + ugi.doAs((PrivilegedExceptionAction) () -> { fs = FileSystem.get(configuration); return true; - } }); } else { logger.warn("hdfs.root.user is not set value!"); fs = FileSystem.get(configuration); } - } } else if (resUploadType == ResUploadType.S3) { + System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE); configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS)); configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT)); configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY)); @@ -204,23 +201,23 @@ public class HadoopUtils implements Closeable { * if rmHaIds is empty, single resourcemanager enabled * if rmHaIds not empty: resourcemanager HA enabled */ - String appUrl = ""; - if (StringUtils.isEmpty(rmHaIds)) { - //single resourcemanager enabled - appUrl = appAddress; - yarnEnabled = true; - } else { - //resourcemanager HA enabled - appUrl = getAppAddress(appAddress, rmHaIds); yarnEnabled = true; - logger.info("application url : {}", appUrl); - } - + String appUrl = StringUtils.isEmpty(rmHaIds) ? appAddress : getAppAddress(appAddress, rmHaIds); if (StringUtils.isBlank(appUrl)) { - throw new Exception("application url is blank"); + throw new Exception("yarn application url generation failed"); + } + if (logger.isDebugEnabled()) { + logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId); } - return String.format(appUrl, applicationId); + String activeResourceManagerPort = String.valueOf(PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088)); + return String.format(appUrl, activeResourceManagerPort, applicationId); + } + + public String getJobHistoryUrl(String applicationId) { + //eg:application_1587475402360_712719 -> job_1587475402360_712719 + String jobId = applicationId.replace("application", "job"); + return String.format(jobHistoryAddress, jobId); } /** diff --git a/dolphinscheduler-common/src/main/resources/common.properties b/dolphinscheduler-common/src/main/resources/common.properties index 5bb7c39289..b6580291bf 100644 --- a/dolphinscheduler-common/src/main/resources/common.properties +++ b/dolphinscheduler-common/src/main/resources/common.properties @@ -61,7 +61,9 @@ fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname -yarn.application.status.address=http://ds1:8088/ws/v1/cluster/apps/%s +yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s +# if custom you resourcemanager port ,you need to replace 8088 else default value. +resource.manager.httpaddress.port=8088 # system env path #dolphinscheduler.env.path=env/dolphinscheduler_env.sh