break60 5 years ago
parent
commit
e87043a3dd
  1. 15
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java
  2. 2
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java
  3. 9
      dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java
  4. 3
      dolphinscheduler-alert/src/main/resources/alert.properties
  5. 1
      dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java
  6. 60
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
  7. 2
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java
  8. 24
      dolphinscheduler-common/src/main/resources/common.properties
  9. 156
      dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java
  10. 3
      dolphinscheduler-dao/src/main/resources/datasource.properties
  11. 131
      dolphinscheduler-server/src/main/resources/config/install_config.conf
  12. 398
      install.sh
  13. 3
      pom.xml
  14. 6
      script/dolphinscheduler-daemon.sh
  15. 16
      script/scp-hosts.sh
  16. 7
      script/start-all.sh
  17. 8
      script/stop-all.sh

15
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactory.java

@ -17,9 +17,6 @@
package org.apache.dolphinscheduler.alert.template; package org.apache.dolphinscheduler.alert.template;
import org.apache.dolphinscheduler.alert.template.impl.DefaultHTMLTemplate; import org.apache.dolphinscheduler.alert.template.impl.DefaultHTMLTemplate;
import org.apache.dolphinscheduler.alert.utils.Constants;
import org.apache.dolphinscheduler.alert.utils.PropertyUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -30,8 +27,6 @@ public class AlertTemplateFactory {
private static final Logger logger = LoggerFactory.getLogger(AlertTemplateFactory.class); private static final Logger logger = LoggerFactory.getLogger(AlertTemplateFactory.class);
private static final String alertTemplate = PropertyUtils.getString(Constants.ALERT_TEMPLATE);
private AlertTemplateFactory(){} private AlertTemplateFactory(){}
/** /**
@ -39,16 +34,6 @@ public class AlertTemplateFactory {
* @return a template, default is DefaultHTMLTemplate * @return a template, default is DefaultHTMLTemplate
*/ */
public static AlertTemplate getMessageTemplate() { public static AlertTemplate getMessageTemplate() {
if(StringUtils.isEmpty(alertTemplate)){
return new DefaultHTMLTemplate();
}
switch (alertTemplate){
case "html":
return new DefaultHTMLTemplate(); return new DefaultHTMLTemplate();
default:
throw new IllegalArgumentException(String.format("not support alert template: %s",alertTemplate));
}
} }
} }

2
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/Constants.java

@ -77,8 +77,6 @@ public class Constants {
public static final int NUMBER_1000 = 1000; public static final int NUMBER_1000 = 1000;
public static final String ALERT_TEMPLATE = "alert.template";
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";

9
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/utils/MailUtils.java

@ -260,9 +260,14 @@ public class MailUtils {
part1.setContent(partContent, Constants.TEXT_HTML_CHARSET_UTF_8); part1.setContent(partContent, Constants.TEXT_HTML_CHARSET_UTF_8);
// set attach file // set attach file
MimeBodyPart part2 = new MimeBodyPart(); MimeBodyPart part2 = new MimeBodyPart();
// make excel file
ExcelUtils.genExcelFile(content,title, xlsFilePath);
File file = new File(xlsFilePath + Constants.SINGLE_SLASH + title + Constants.EXCEL_SUFFIX_XLS); File file = new File(xlsFilePath + Constants.SINGLE_SLASH + title + Constants.EXCEL_SUFFIX_XLS);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
// make excel file
ExcelUtils.genExcelFile(content,title,xlsFilePath);
part2.attachFile(file); part2.attachFile(file);
part2.setFileName(MimeUtility.encodeText(title + Constants.EXCEL_SUFFIX_XLS,Constants.UTF_8,"B")); part2.setFileName(MimeUtility.encodeText(title + Constants.EXCEL_SUFFIX_XLS,Constants.UTF_8,"B"));
// add components to collection // add components to collection

3
dolphinscheduler-alert/src/main/resources/alert.properties

@ -18,9 +18,6 @@
#alert type is EMAIL/SMS #alert type is EMAIL/SMS
alert.type=EMAIL alert.type=EMAIL
# alter msg template, default is html template
#alert.template=html
# mail server configuration # mail server configuration
mail.protocol=SMTP mail.protocol=SMTP
mail.server.host=xxx.xxx.com mail.server.host=xxx.xxx.com

1
dolphinscheduler-alert/src/test/java/org/apache/dolphinscheduler/alert/template/AlertTemplateFactoryTest.java

@ -47,7 +47,6 @@ public class AlertTemplateFactoryTest {
public void testGetMessageTemplate(){ public void testGetMessageTemplate(){
PowerMockito.mockStatic(PropertyUtils.class); PowerMockito.mockStatic(PropertyUtils.class);
when(PropertyUtils.getString(Constants.ALERT_TEMPLATE)).thenReturn("html");
AlertTemplate defaultTemplate = AlertTemplateFactory.getMessageTemplate(); AlertTemplate defaultTemplate = AlertTemplateFactory.getMessageTemplate();

60
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java

@ -57,7 +57,8 @@ public class HadoopUtils implements Closeable {
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER); private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler"); public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY"; private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
@ -110,14 +111,15 @@ public class HadoopUtils implements Closeable {
try { try {
configuration = new Configuration(); configuration = new Configuration();
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE); String resourceStorageType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType); ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
if (resUploadType == ResUploadType.HDFS) { if (resUploadType == ResUploadType.HDFS){
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)) { if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)){
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)); PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
hdfsUser = "";
UserGroupInformation.setConfiguration(configuration); UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)); PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
@ -166,21 +168,6 @@ public class HadoopUtils implements Closeable {
} }
String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
//not use resourcemanager
if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
yarnEnabled = false;
} else if (!StringUtils.isEmpty(rmHaIds)) {
//resourcemanager HA enabled
appAddress = getAppAddress(appAddress, rmHaIds);
yarnEnabled = true;
logger.info("appAddress : {}", appAddress);
} else {
//single resourcemanager enabled
yarnEnabled = true;
}
configuration.set(Constants.YARN_APPLICATION_STATUS_ADDRESS, appAddress);
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(), e); logger.error(e.getMessage(), e);
} }
@ -200,7 +187,29 @@ public class HadoopUtils implements Closeable {
* @return url of application * @return url of application
*/ */
public String getApplicationUrl(String applicationId) { public String getApplicationUrl(String applicationId) {
return String.format(configuration.get(Constants.YARN_APPLICATION_STATUS_ADDRESS), applicationId); /**
* if rmHaIds contains xx, it signs not use resourcemanager
* otherwise:
* if rmHaIds is empty, single resourcemanager enabled
* if rmHaIds not empty: resourcemanager HA enabled
*/
String appUrl = "";
//not use resourcemanager
if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
yarnEnabled = false;
logger.warn("should not step here");
} else if (!StringUtils.isEmpty(rmHaIds)) {
//resourcemanager HA enabled
appUrl = getAppAddress(appAddress, rmHaIds);
yarnEnabled = true;
logger.info("application url : {}", appUrl);
} else {
//single resourcemanager enabled
yarnEnabled = true;
}
return String.format(appUrl, applicationId);
} }
/** /**
@ -484,13 +493,6 @@ public class HadoopUtils implements Closeable {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode)); return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
} }
/**
* get absolute path and name for file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
/** /**
* get hdfs file name * get hdfs file name

2
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HttpUtils.java

@ -40,7 +40,7 @@ public class HttpUtils {
/** /**
* get http request content * get http request content
* @param url url * @param url url
* @return http response * @return http get request response content
*/ */
public static String get(String url){ public static String get(String url){
CloseableHttpClient httpclient = HttpClients.createDefault(); CloseableHttpClient httpclient = HttpClients.createDefault();

24
dolphinscheduler-common/src/main/resources/common.properties

@ -19,22 +19,22 @@
resource.storage.type=NONE resource.storage.type=NONE
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
#resource.upload.path=/dolphinscheduler resource.upload.path=/dolphinscheduler
# user data local directory path, please make sure the directory exists and have read write permissions # user data local directory path, please make sure the directory exists and have read write permissions
#data.basedir.path=/tmp/dolphinscheduler #data.basedir.path=/tmp/dolphinscheduler
# whether kerberos starts # whether kerberos starts
#hadoop.security.authentication.startup.state=false hadoop.security.authentication.startup.state=false
# java.security.krb5.conf path # java.security.krb5.conf path
#java.security.krb5.conf.path=/opt/krb5.conf java.security.krb5.conf.path=/opt/krb5.conf
# loginUserFromKeytab user # login user from keytab username
#login.user.keytab.username=hdfs-mycluster@ESZ.COM login.user.keytab.username=hdfs-mycluster@ESZ.COM
# loginUserFromKeytab path # loginUserFromKeytab path
#login.user.keytab.path=/opt/hdfs.headless.keytab login.user.keytab.path=/opt/hdfs.headless.keytab
#resource.view.suffixs #resource.view.suffixs
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties #resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
@ -46,21 +46,21 @@ hdfs.root.user=hdfs
fs.defaultFS=hdfs://mycluster:8020 fs.defaultFS=hdfs://mycluster:8020
# if resource.storage.type=S3,s3 endpoint # if resource.storage.type=S3,s3 endpoint
#fs.s3a.endpoint=http://192.168.199.91:9010 fs.s3a.endpoint=http://192.168.199.91:9010
# if resource.storage.type=S3,s3 access key # if resource.storage.type=S3,s3 access key
#fs.s3a.access.key=A3DXS30FO22544RE fs.s3a.access.key=A3DXS30FO22544RE
# if resource.storage.type=S3,s3 secret key # if resource.storage.type=S3,s3 secret key
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO # if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname. # if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions, TODO # system env path
#dolphinscheduler.env.path=env/dolphinscheduler_env.sh #dolphinscheduler.env.path=env/dolphinscheduler_env.sh
kerberos.expire.time=7 kerberos.expire.time=7

156
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/HadoopUtilsTest.java

@ -17,88 +17,186 @@
package org.apache.dolphinscheduler.common.utils; package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.junit.Ignore; import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.io.IOException; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
@Ignore @RunWith(MockitoJUnitRunner.class)
//todo there is no hadoop environment
public class HadoopUtilsTest { public class HadoopUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtilsTest.class); private static final Logger logger = LoggerFactory.getLogger(HadoopUtilsTest.class);
private HadoopUtils hadoopUtils = HadoopUtils.getInstance();
@Test @Test
public void getActiveRMTest() { public void getActiveRMTest() {
logger.info(HadoopUtils.getAppAddress("http://ark1:8088/ws/v1/cluster/apps/%s","192.168.xx.xx,192.168.xx.xx")); try{
hadoopUtils.getAppAddress("http://ark1:8088/ws/v1/cluster/apps/%s","192.168.xx.xx,192.168.xx.xx");
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
} }
@Test @Test
public void getApplicationStatusAddressTest(){ public void rename() {
logger.info(HadoopUtils.getInstance().getApplicationUrl("application_1548381297012_0030"));
boolean result = false;
try {
result = hadoopUtils.rename("/dolphinscheduler/hdfs1","/dolphinscheduler/hdfs2");
} catch (Exception e) {
logger.error(e.getMessage(),e);
} }
Assert.assertEquals(false, result);
}
@Test @Test
public void test() throws IOException { public void getConfiguration(){
HadoopUtils.getInstance().copyLocalToHdfs("/root/teamviewer_13.1.8286.x86_64.rpm", "/journey", true, true); Configuration conf = hadoopUtils.getConfiguration();
} }
@Test @Test
public void readFileTest(){ public void mkdir() {
boolean result = false;
try { try {
byte[] bytes = HadoopUtils.getInstance().catFile("/dolphinscheduler/hdfs/resources/35435.sh"); result = hadoopUtils.mkdir("/dolphinscheduler/hdfs");
logger.info(new String(bytes)); } catch (Exception e) {
logger.error(e.getMessage(), e);
}
Assert.assertEquals(false, result);
}
@Test
public void delete() {
boolean result = false;
try {
result = hadoopUtils.delete("/dolphinscheduler/hdfs",true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
Assert.assertEquals(false, result);
}
@Test
public void exists() {
boolean result = false;
try {
result = hadoopUtils.exists("/dolphinscheduler/hdfs");
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(), e);
}
Assert.assertEquals(false, result);
}
@Test
public void getHdfsDataBasePath() {
String result = hadoopUtils.getHdfsDataBasePath();
Assert.assertEquals("/dolphinscheduler", result);
} }
@Test
public void getHdfsResDir() {
String result = hadoopUtils.getHdfsResDir("11000");
Assert.assertEquals("/dolphinscheduler/11000/resources", result);
} }
@Test @Test
public void testCapacity(){ public void getHdfsUserDir() {
String result = hadoopUtils.getHdfsUserDir("11000",1000);
Assert.assertEquals("/dolphinscheduler/11000/home/1000", result);
}
@Test
public void getHdfsUdfDir() {
String result = hadoopUtils.getHdfsUdfDir("11000");
Assert.assertEquals("/dolphinscheduler/11000/udfs", result);
} }
@Test @Test
public void testMove(){ public void getHdfsFileName() {
HadoopUtils instance = HadoopUtils.getInstance(); String result = hadoopUtils.getHdfsFileName(ResourceType.FILE,"11000","aa.txt");
Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result);
}
@Test
public void isYarnEnabled() {
boolean result = hadoopUtils.isYarnEnabled();
Assert.assertEquals(false, result);
}
@Test
public void test() {
try { try {
instance.copy("/opt/apptest/test.dat","/opt/apptest/test.dat.back",true,true); hadoopUtils.copyLocalToHdfs("/root/teamviewer_13.1.8286.x86_64.rpm", "/journey", true, true);
} catch (Exception e) { } catch (Exception e) {
logger.error(e.getMessage(), e); logger.error(e.getMessage(), e);
} }
}
@Test
public void readFileTest(){
try {
byte[] bytes = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/35435.sh");
logger.info(new String(bytes));
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
}
@Test
public void testMove(){
try {
hadoopUtils.copy("/opt/apptest/test.dat","/opt/apptest/test.dat.back",true,true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
} }
@Test @Test
public void getApplicationStatus() { public void getApplicationStatus() {
logger.info(HadoopUtils.getInstance().getApplicationStatus("application_1542010131334_0029").toString()); try {
logger.info(hadoopUtils.getApplicationStatus("application_1542010131334_0029").toString());
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
} }
@Test @Test
public void getApplicationUrl(){ public void getApplicationUrl(){
String application_1516778421218_0042 = HadoopUtils.getInstance().getApplicationUrl("application_1529051418016_0167"); String application_1516778421218_0042 = hadoopUtils.getApplicationUrl("application_1529051418016_0167");
logger.info(application_1516778421218_0042); logger.info(application_1516778421218_0042);
} }
@Test @Test
public void catFileTest()throws Exception{ public void catFileWithLimitTest() {
List<String> stringList = HadoopUtils.getInstance().catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py", 0, 1000); List<String> stringList = new ArrayList<>();
try {
stringList = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py", 0, 1000);
logger.info(String.join(",",stringList)); logger.info(String.join(",",stringList));
} catch (Exception e) {
logger.error(e.getMessage(), e);
} }
@Test
public void getHdfsFileNameTest(){
logger.info(HadoopUtils.getHdfsFileName(ResourceType.FILE,"test","/test"));
} }
@Test @Test
public void getHdfsResourceFileNameTest(){ public void catFileTest() {
logger.info(HadoopUtils.getHdfsResourceFileName("test","/test")); byte[] content = new byte[0];
try {
content = hadoopUtils.catFile("/dolphinscheduler/hdfs/resources/WCSparkPython.py");
} catch (Exception e) {
logger.error(e.getMessage(), e);
} }
logger.info(Arrays.toString(content));
@Test
public void getHdfsUdfFileNameTest(){
logger.info(HadoopUtils.getHdfsUdfFileName("test","/test.jar"));
} }
} }

3
dolphinscheduler-dao/src/main/resources/datasource.properties

@ -25,9 +25,6 @@ spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
spring.datasource.username=test spring.datasource.username=test
spring.datasource.password=test spring.datasource.password=test
## base spring data source configuration todo need to remove
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# connection configuration # connection configuration
#spring.datasource.initialSize=5 #spring.datasource.initialSize=5
# min connection number # min connection number

131
dolphinscheduler-server/src/main/resources/config/install_config.conf

@ -15,11 +15,126 @@
# limitations under the License. # limitations under the License.
# #
installPath=/data1_1T/dolphinscheduler
deployUser=dolphinscheduler # NOTICE : If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[`
ips=ark0,ark1,ark2,ark3,ark4 # postgresql or mysql
sshPort=22 dbtype="mysql"
masters=ark0,ark1
workers=ark2,ark3,ark4 # db config
alertServer=ark3 # db address and port
apiServers=ark1 dbhost="192.168.xx.xx:3306"
# db username
username="xx"
# db passwprd
# NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[`
password="xx"
# zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler"
# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"
# alert config
# mail server host
mailServerHost="smtp.exmail.qq.com"
# mail server port
# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
mailServerPort="25"
# sender
mailSender="xxxxxxxxxx"
# user
mailUser="xxxxxxxxxx"
# sender password
# note: The mail.passwd is email service authorization code, not the email login password.
mailPassword="xxxxxxxxxx"
# TLS mail protocol support
starttlsEnable="false"
sslTrust="xxxxxxxxxx"
# SSL mail protocol support
# note: The SSL protocol is enabled by default.
# only one of TLS and SSL can be in the true state.
sslEnable="true"
# resource storage type:HDFS,S3,NONE
resourceStorageType="NONE"
# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"
# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
singleYarnIp="ark1"
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。/dolphinscheduler is recommended
resourceUploadPath="/dolphinscheduler"
# who have permissions to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hdfs"
# kerberos config
# whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username
keytabUserName="hdfs-mycluster@ESZ.COM"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
# api server port
apiServerPort="12345"
# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ark0,ark1,ark2,ark3,ark4"
# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort="22"
# run master machine
# Note: list of hosts hostname for deploying master
masters="ark0,ark1"
# run worker machine
# note: list of machine hostnames for deploying workers
workers="ark2,ark3,ark4"
# run alert machine
# note: list of machine hostnames for deploying alert server
alertServer="ark3"
# run api machine
# note: list of machine hostnames for deploying api server
apiServers="ark1"
# whether to start monitoring self-starting scripts
monitorServerState="false"

398
install.sh

@ -19,309 +19,25 @@
workDir=`dirname $0` workDir=`dirname $0`
workDir=`cd ${workDir};pwd` workDir=`cd ${workDir};pwd`
#To be compatible with MacOS and Linux source ${workDir}/conf/config/install_config.conf
# 1.replace file
echo "1.replace file"
txt="" txt=""
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX # Mac OSX
txt="''" txt="''"
elif [[ "$OSTYPE" == "linux-gnu" ]]; then
# linux
txt=""
elif [[ "$OSTYPE" == "cygwin" ]]; then
# POSIX compatibility layer and Linux environment emulation for Windows
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "msys" ]]; then
# Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "win32" ]]; then
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "freebsd"* ]]; then
# ...
txt=""
else
# Unknown.
echo "Operating system unknown, please tell us(submit issue) for better service"
exit 1
fi
source ${workDir}/conf/config/install_config.conf
# for example postgresql or mysql ...
dbtype="postgresql"
# db config
# db address and port
dbhost="192.168.xx.xx:5432"
# db name
dbname="dolphinscheduler"
# db username
username="xx"
# db passwprd
# Note: if there are special characters, please use the \ transfer character to transfer
passowrd="xx"
# conf/config/install_config.conf config
# Note: the installation path is not the same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler"
# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"
# zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ark0,ark1,ark2,ark3,ark4"
# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort=22
# run master machine
# Note: list of hosts hostname for deploying master
masters="ark0,ark1"
# run worker machine
# note: list of machine hostnames for deploying workers
workers="ark2,ark3,ark4"
# run alert machine
# note: list of machine hostnames for deploying alert server
alertServer="ark3"
# run api machine
# note: list of machine hostnames for deploying api server
apiServers="ark1"
# alert config
# mail protocol
mailProtocol="SMTP"
# mail server host
mailServerHost="smtp.exmail.qq.com"
# mail server port
# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
mailServerPort="25"
# sender
mailSender="xxxxxxxxxx"
# user
mailUser="xxxxxxxxxx"
# sender password
# note: The mail.passwd is email service authorization code, not the email login password.
mailPassword="xxxxxxxxxx"
# TLS mail protocol support
starttlsEnable="false"
sslTrust="xxxxxxxxxx"
# SSL mail protocol support
# note: The SSL protocol is enabled by default.
# only one of TLS and SSL can be in the true state.
sslEnable="true"
# download excel path
xlsFilePath="/tmp/xls"
# Enterprise WeChat Enterprise ID Configuration
enterpriseWechatCorpId="xxxxxxxxxx"
# Enterprise WeChat application Secret configuration
enterpriseWechatSecret="xxxxxxxxxx"
# Enterprise WeChat Application AgentId Configuration
enterpriseWechatAgentId="xxxxxxxxxx"
# Enterprise WeChat user configuration, multiple users to , split
enterpriseWechatUsers="xxxxx,xxxxx"
# whether to start monitoring self-starting scripts
monitorServerState="false"
# resource Center upload and select storage method:HDFS,S3,NONE
resUploadStartupType="NONE"
# if resUploadStartupType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"
# if S3 is configured, the following configuration is required.
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# resourcemanager HA configuration, if it is a single resourcemanager, here is yarnHaIps=""
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine.
singleYarnIp="ark1"
# hdfs root path, the owner of the root path must be the deployment user.
# versions prior to 1.1.0 do not automatically create the hdfs root directory, you need to create it yourself.
hdfsPath="/dolphinscheduler"
# have users who create directory permissions under hdfs root path /
# Note: if kerberos is enabled, hdfsRootUser="" can be used directly.
hdfsRootUser="hdfs"
# common config
# Program root path
programPath="/tmp/dolphinscheduler"
# download path
downloadPath="/tmp/dolphinscheduler/download"
# task execute path
execPath="/tmp/dolphinscheduler/exec"
# SHELL environmental variable path
shellEnvPath="$installPath/conf/env/dolphinscheduler_env.sh"
# suffix of the resource file
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# development status, if true, for the SHELL script, you can view the encapsulated SHELL script in the execPath directory.
# If it is false, execute the direct delete
devState="true"
# kerberos config
# kerberos whether to start
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username
keytabUserName="hdfs-mycluster@ESZ.COM"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
# zk config
# zk root directory
zkRoot="/dolphinscheduler"
# zk session timeout
zkSessionTimeout="300"
# zk connection timeout
zkConnectionTimeout="300"
# zk retry interval
zkRetryMaxSleep="100"
# zk retry maximum number of times
zkRetryMaxtime="5"
# master config
# master execution thread maximum number, maximum parallelism of process instance
masterExecThreads="100"
# the maximum number of master task execution threads, the maximum degree of parallelism for each process instance
masterExecTaskNum="20"
# master heartbeat interval
masterHeartbeatInterval="10"
# master task submission retries
masterTaskCommitRetryTimes="5"
# master task submission retry interval
masterTaskCommitInterval="1000"
# master maximum cpu average load, used to determine whether the master has execution capability
masterMaxCpuLoadAvg="100"
# master reserve memory to determine if the master has execution capability
masterReservedMemory="0.1"
# worker config
# worker execution thread
workerExecThreads="100"
# worker heartbeat interval
workerHeartbeatInterval="10"
# worker number of fetch tasks
workerFetchTaskNum="3"
# worker reserve memory to determine if the master has execution capability
workerReservedMemory="0.1"
# api config
# api server port
apiServerPort="12345"
# api session timeout
apiServerSessionTimeout="7200"
# api server context path
apiServerContextPath="/dolphinscheduler/"
# spring max file size
springMaxFileSize="1024MB"
# spring max request size
springMaxRequestSize="1024MB"
# api max http post size
apiMaxHttpPostSize="5000000"
# 1,replace file
echo "1,replace file"
if [ $dbtype == "mysql" ];then
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=com.mysql.jdbc.Driver#g" conf/application.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=com.mysql.jdbc.Driver#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate#g" conf/quartz.properties
fi fi
datasourceDriverClassname="com.mysql.jdbc.Driver"
if [ $dbtype == "postgresql" ];then if [ $dbtype == "postgresql" ];then
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties datasourceDriverClassname="org.postgresql.Driver"
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=org.postgresql.Driver#g" conf/application.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=org.postgresql.Driver#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.PostgreSQLDelegate#g" conf/quartz.properties
fi fi
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=${datasourceDriverClassname}#g" conf/datasource.properties
sed -i ${txt} "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/application.properties sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:${dbtype}://${dbhost}/dolphinscheduler?characterEncoding=UTF-8#g" conf/datasource.properties
sed -i ${txt} "s#master.exec.task.num.*#master.exec.task.num=${masterExecTaskNum}#g" conf/application.properties sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/datasource.properties
sed -i ${txt} "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/application.properties sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${password}#g" conf/datasource.properties
sed -i ${txt} "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/application.properties
sed -i ${txt} "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/application.properties
sed -i ${txt} "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/application.properties
sed -i ${txt} "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/application.properties
sed -i ${txt} "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/application.properties
sed -i ${txt} "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/application.properties
sed -i ${txt} "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/application.properties
sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties
@ -329,37 +45,18 @@ sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/co
sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties
sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties
sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common.properties sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common.properties
sed -i ${txt} "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common.properties
sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common.properties
sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common.properties
sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties
sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common.properties sed -i ${txt} "s#resource.upload.path.*#resource.upload.path=${resourceUploadPath}#g" conf/common.properties
sed -i ${txt} "s#res.upload.startup.type.*#res.upload.startup.type=${resUploadStartupType}#g" conf/common.properties sed -i ${txt} "s#resource.storage.type.*#resource.storage.type=${resourceStorageType}#g" conf/common.properties
sed -i ${txt} "s#dolphinscheduler.env.path.*#dolphinscheduler.env.path=${shellEnvPath}#g" conf/common.properties
sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common.properties
sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common.properties
sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties
sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/common.properties sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.dolphinscheduler.root.*#zookeeper.dolphinscheduler.root=${zkRoot}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.retry.max.sleep.*#zookeeper.retry.max.sleep=${zkRetryMaxSleep}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/common.properties
sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties
sed -i ${txt} "s#server.servlet.session.timeout.*#server.servlet.session.timeout=${apiServerSessionTimeout}#g" conf/application-api.properties
sed -i ${txt} "s#server.servlet.context-path.*#server.servlet.context-path=${apiServerContextPath}#g" conf/application-api.properties
sed -i ${txt} "s#spring.servlet.multipart.max-file-size.*#spring.servlet.multipart.max-file-size=${springMaxFileSize}#g" conf/application-api.properties
sed -i ${txt} "s#spring.servlet.multipart.max-request-size.*#spring.servlet.multipart.max-request-size=${springMaxRequestSize}#g" conf/application-api.properties
sed -i ${txt} "s#server.jetty.max-http-post-size.*#server.jetty.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application-api.properties
sed -i ${txt} "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
@ -368,85 +65,38 @@ sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.propert
sed -i ${txt} "s#mail.smtp.starttls.enable.*#mail.smtp.starttls.enable=${starttlsEnable}#g" conf/alert.properties sed -i ${txt} "s#mail.smtp.starttls.enable.*#mail.smtp.starttls.enable=${starttlsEnable}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.trust.*#mail.smtp.ssl.trust=${sslTrust}#g" conf/alert.properties sed -i ${txt} "s#mail.smtp.ssl.trust.*#mail.smtp.ssl.trust=${sslTrust}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.enable.*#mail.smtp.ssl.enable=${sslEnable}#g" conf/alert.properties sed -i ${txt} "s#mail.smtp.ssl.enable.*#mail.smtp.ssl.enable=${sslEnable}#g" conf/alert.properties
sed -i ${txt} "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.corp.id.*#enterprise.wechat.corp.id=${enterpriseWechatCorpId}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.secret.*#enterprise.wechat.secret=${enterpriseWechatSecret}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.agent.id.*#enterprise.wechat.agent.id=${enterpriseWechatAgentId}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.users.*#enterprise.wechat.users=${enterpriseWechatUsers}#g" conf/alert.properties
sed -i ${txt} "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
sed -i ${txt} "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
sed -i ${txt} "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
sed -i ${txt} "s#masters.*#masters=${masters}#g" conf/config/install_config.conf
sed -i ${txt} "s#workers.*#workers=${workers}#g" conf/config/install_config.conf
sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/install_config.conf
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/install_config.conf
sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
# 2.create directory
# 2,create directory echo "2.create directory"
echo "2,create directory"
if [ ! -d $installPath ];then if [ ! -d $installPath ];then
sudo mkdir -p $installPath sudo mkdir -p $installPath
sudo chown -R $deployUser:$deployUser $installPath sudo chown -R $deployUser:$deployUser $installPath
fi fi
hostsArr=(${ips//,/ }) # 3.scp resources
for host in ${hostsArr[@]} echo "3.scp resources"
do
# create if programPath does not exist
if ! ssh -p $sshPort $host test -e $programPath; then
ssh -p $sshPort $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
fi
# create if downloadPath does not exist
if ! ssh -p $sshPort $host test -e $downloadPath; then
ssh -p $sshPort $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
fi
# create if execPath does not exist
if ! ssh -p $sshPort $host test -e $execPath; then
ssh -p $sshPort $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
fi
# create if xlsFilePath does not exist
if ! ssh -p $sshPort $host test -e $xlsFilePath; then
ssh -p $sshPort $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
fi
done
# 3,scp resources
echo "3,scp resources"
sh ${workDir}/script/scp-hosts.sh sh ${workDir}/script/scp-hosts.sh
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
echo 'scp copy completed' echo 'scp copy completed'
else else
echo 'scp copy failed to exit' echo 'scp copy failed to exit'
exit -1 exit 1
fi fi
# 4,stop server # 4.stop server
echo "4,stop server" echo "4.stop server"
sh ${workDir}/script/stop-all.sh sh ${workDir}/script/stop-all.sh
# 5,delete zk node # 5.delete zk node
echo "5,delete zk node" echo "5.delete zk node"
sh ${workDir}/script/remove-zk-node.sh $zkRoot sh ${workDir}/script/remove-zk-node.sh $zkRoot
# 6,startup # 6.startup
echo "6,startup" echo "6.startup"
sh ${workDir}/script/start-all.sh sh ${workDir}/script/start-all.sh

3
pom.xml

@ -760,8 +760,9 @@
<include>**/common/utils/StringTest.java</include> <include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include> <include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include> <include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include> <include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include> <include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include> <include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include> <include>**/dao/mapper/CommandMapperTest.java</include>

6
script/dolphinscheduler-daemon.sh

@ -35,6 +35,8 @@ BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd` BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/.. DOLPHINSCHEDULER_HOME=$BIN_DIR/..
source /etc/profile
export JAVA_HOME=$JAVA_HOME export JAVA_HOME=$JAVA_HOME
#export JAVA_HOME=/opt/soft/jdk #export JAVA_HOME=/opt/soft/jdk
export HOSTNAME=`hostname` export HOSTNAME=`hostname`
@ -90,8 +92,8 @@ case $startStop in
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS" exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS"
echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 > /dev/null &" echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &"
nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 > /dev/null & nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &
echo $! > $pid echo $! > $pid
;; ;;

16
script/scp-hosts.sh

@ -28,12 +28,14 @@ do
ssh -p $sshPort $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath" ssh -p $sshPort $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath"
fi fi
echo "scp dirs to $host/$installPath starting"
ssh -p $sshPort $host "cd $installPath/; rm -rf bin/ conf/ lib/ script/ sql/ ui/" ssh -p $sshPort $host "cd $installPath/; rm -rf bin/ conf/ lib/ script/ sql/ ui/"
scp -P $sshPort -r $workDir/../bin $host:$installPath
scp -P $sshPort -r $workDir/../conf $host:$installPath for dsDir in bin conf lib script sql ui install.sh
scp -P $sshPort -r $workDir/../lib $host:$installPath do
scp -P $sshPort -r $workDir/../script $host:$installPath echo "start to scp $dsDir to $host/$installPath"
scp -P $sshPort -r $workDir/../sql $host:$installPath scp -P $sshPort -r $workDir/../$dsDir $host:$installPath
scp -P $sshPort -r $workDir/../ui $host:$installPath done
scp -P $sshPort $workDir/../install.sh $host:$installPath
echo "scp dirs to $host/$installPath complete"
done done

7
script/start-all.sh

@ -23,7 +23,7 @@ source $workDir/../conf/config/install_config.conf
mastersHost=(${masters//,/ }) mastersHost=(${masters//,/ })
for master in ${mastersHost[@]} for master in ${mastersHost[@]}
do do
echo $master echo "$master master server is starting"
ssh -p $sshPort $master "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start master-server;" ssh -p $sshPort $master "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start master-server;"
done done
@ -31,7 +31,7 @@ done
workersHost=(${workers//,/ }) workersHost=(${workers//,/ })
for worker in ${workersHost[@]} for worker in ${workersHost[@]}
do do
echo $worker echo "$worker worker server is starting"
ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start worker-server;" ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start worker-server;"
ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start logger-server;" ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start logger-server;"
@ -42,8 +42,7 @@ ssh -p $sshPort $alertServer "cd $installPath/; sh bin/dolphinscheduler-daemon.
apiServersHost=(${apiServers//,/ }) apiServersHost=(${apiServers//,/ })
for apiServer in ${apiServersHost[@]} for apiServer in ${apiServersHost[@]}
do do
echo $apiServer echo "$apiServer worker server is starting"
ssh -p $sshPort $apiServer "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start api-server;" ssh -p $sshPort $apiServer "cd $installPath/; sh bin/dolphinscheduler-daemon.sh start api-server;"
done done

8
script/stop-all.sh

@ -24,7 +24,7 @@ source $workDir/../conf/config/install_config.conf
mastersHost=(${masters//,/ }) mastersHost=(${masters//,/ })
for master in ${mastersHost[@]} for master in ${mastersHost[@]}
do do
echo $master echo "$master master server is stopping"
ssh -p $sshPort $master "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop master-server;" ssh -p $sshPort $master "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop master-server;"
done done
@ -32,8 +32,7 @@ done
workersHost=(${workers//,/ }) workersHost=(${workers//,/ })
for worker in ${workersHost[@]} for worker in ${workersHost[@]}
do do
echo $worker echo "$worker worker server is stopping"
ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop worker-server;" ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop worker-server;"
ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop logger-server;" ssh -p $sshPort $worker "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop logger-server;"
done done
@ -43,8 +42,7 @@ ssh -p $sshPort $alertServer "cd $installPath/; sh bin/dolphinscheduler-daemon.
apiServersHost=(${apiServers//,/ }) apiServersHost=(${apiServers//,/ })
for apiServer in ${apiServersHost[@]} for apiServer in ${apiServersHost[@]}
do do
echo $apiServer echo "$apiServer worker server is stopping"
ssh -p $sshPort $apiServer "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop api-server;" ssh -p $sshPort $apiServer "cd $installPath/; sh bin/dolphinscheduler-daemon.sh stop api-server;"
done done

Loading…
Cancel
Save