分布式调度框架。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

886 lines
35 KiB

<!DOCTYPE HTML>
<html lang="zh-hans" >
<head>
<meta charset="UTF-8">
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<title>部署文档 · 调度系统-EasyScheduler</title>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="description" content="">
<meta name="generator" content="GitBook 3.2.3">
<meta name="author" content="YIGUAN">
<link rel="stylesheet" href="gitbook/style.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-expandable-chapters/expandable-chapters.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-insert-logo-link/plugin.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-highlight/website.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-search/search.css">
<link rel="stylesheet" href="gitbook/gitbook-plugin-fontsettings/website.css">
<link rel="stylesheet" href="styles/website.css">
<meta name="HandheldFriendly" content="true"/>
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black">
<link rel="apple-touch-icon-precomposed" sizes="152x152" href="gitbook/images/apple-touch-icon-precomposed-152.png">
<link rel="shortcut icon" href="gitbook/images/favicon.ico" type="image/x-icon">
<link rel="next" href="task-plugin-development.html" />
<link rel="prev" href="architecture-design.html" />
</head>
<body>
<div class="book">
<div class="book-summary">
<div id="book-search-input" role="search">
<input type="text" placeholder="输入并搜索" />
</div>
<nav role="navigation">
<ul class="summary">
<li class="chapter " data-level="1.1" data-path="./">
<a href="./">
Easyscheduler简介
</a>
</li>
<li class="chapter " data-level="1.2" >
<span>
前端文档
</span>
<ul class="articles">
<li class="chapter " data-level="1.2.1" data-path="frontend-deploy.html">
<a href="frontend-deploy.html#前端项目环境构建及编译">
环境搭建
</a>
</li>
<li class="chapter " data-level="1.2.2" data-path="frontend-deploy.html">
<a href="frontend-deploy.html#安装及配置">
安装及配置
</a>
</li>
<li class="chapter " data-level="1.2.3" data-path="frontend-deploy.html">
<a href="frontend-deploy.html#项目生产环境配置">
项目生产环境Nginx配置
</a>
</li>
<li class="chapter " data-level="1.2.4" data-path="frontend-deploy.html">
<a href="frontend-deploy.html#前端项目发布">
前端项目发布
</a>
</li>
<li class="chapter " data-level="1.2.5" data-path="frontend-deploy.html">
<a href="frontend-deploy.html#问题">
问题
</a>
</li>
<li class="chapter " data-level="1.2.6" data-path="frontend-development.html">
<a href="frontend-development.html#项目目录结构">
项目目录结构
</a>
</li>
<li class="chapter " data-level="1.2.7" data-path="frontend-development.html">
<a href="frontend-development.html#系统功能模块">
系统功能模块
</a>
</li>
<li class="chapter " data-level="1.2.8" data-path="frontend-development.html">
<a href="frontend-development.html#路由和状态管理">
路由和状态管理
</a>
</li>
<li class="chapter " data-level="1.2.9" data-path="frontend-development.html">
<a href="frontend-development.html#规范">
规范
</a>
</li>
<li class="chapter " data-level="1.2.10" data-path="frontend-development.html">
<a href="frontend-development.html#接口">
接口
</a>
</li>
<li class="chapter " data-level="1.2.11" data-path="frontend-development.html">
<a href="frontend-development.html#扩展开发">
扩展开发
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.3" >
<span>
后端文档
</span>
<ul class="articles">
<li class="chapter " data-level="1.3.1" data-path="architecture-design.html">
<a href="architecture-design.html#调度系统架构设计">
系统架构设计
</a>
</li>
<li class="chapter " data-level="1.3.2" data-path="backend-deploy.html">
<a href="backend-deploy.html#部署文档">
部署文档
</a>
</li>
<li class="chapter " data-level="1.3.3" data-path="task-plugin-development.html">
<a href="task-plugin-development.html#任务插件开发">
自定义任务插件文档
</a>
</li>
</ul>
</li>
<li class="chapter " data-level="1.4" data-path="user-manual.md">
<span>
使用说明文档
</a>
</li>
<li class="divider"></li>
<li>
<a href="https://www.gitbook.com" target="blank" class="gitbook-link">
本书使用 GitBook 发布
</a>
</li>
</ul>
</nav>
</div>
<div class="book-body">
<div class="body-inner">
<div class="book-header" role="navigation">
<!-- Title -->
<h1>
<i class="fa fa-circle-o-notch fa-spin"></i>
<a href="." >部署文档</a>
</h1>
</div>
<div class="page-wrapper" tabindex="-1" role="main">
<div class="page-inner">
<div id="book-search-results">
<div class="search-noresults">
<section class="normal markdown-section">
<h1 id="&#x90E8;&#x7F72;&#x6587;&#x6863;">&#x90E8;&#x7F72;&#x6587;&#x6863;</h1>
<h2 id="&#x57FA;&#x7840;&#x8F6F;&#x4EF6;&#x5B89;&#x88C5;">&#x57FA;&#x7840;&#x8F6F;&#x4EF6;&#x5B89;&#x88C5;</h2>
<ul>
<li><a href="https://blog.csdn.net/u011886447/article/details/79796802" target="_blank">mysql</a> (5.5+) : &#x5FC5;&#x88C5;</li>
<li><a href="https://www.jianshu.com/p/de90172ea680" target="_blank">zookeeper</a>(3.4.6) &#xFF1A;&#x5FC5;&#x88C5; </li>
<li><a href="https://blog.csdn.net/Evankaka/article/details/51612437" target="_blank">hadoop</a>(2.7.3) &#xFF1A;&#x9009;&#x88C5;&#xFF0C;&#x8D44;&#x6E90;&#x4E0A;&#x4F20;&#xFF0C;MR&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
<li><a href="https://staroon.pro/2017/12/09/HiveInstall/" target="_blank">hive</a>(1.2.1) : &#x9009;&#x88C5;&#xFF0C;hive&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
<li>spark(1.x,2.x) : &#x9009;&#x88C5;&#xFF0C;spark&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
<li>postgresql(8.2.15+) : &#x9009;&#x88C5;&#xFF0C;postgresql sql&#x4EFB;&#x52A1;&#x548C;postgresql &#x5B58;&#x50A8;&#x8FC7;&#x7A0B;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
</ul>
<h2 id="&#x9879;&#x76EE;&#x7F16;&#x8BD1;">&#x9879;&#x76EE;&#x7F16;&#x8BD1;</h2>
<ul>
<li>&#x6267;&#x884C;&#x7F16;&#x8BD1;&#x547D;&#x4EE4;&#xFF1A;</li>
</ul>
<pre><code> mvn -U clean package assembly:assembly -Dmaven.test.skip=true
</code></pre><ul>
<li>&#x67E5;&#x770B;&#x76EE;&#x5F55;</li>
</ul>
<p>&#x6B63;&#x5E38;&#x7F16;&#x8BD1;&#x5B8C;&#x540E;&#xFF0C;&#x4F1A;&#x5728;&#x5F53;&#x524D;&#x76EE;&#x5F55;&#x751F;&#x6210; target/escheduler-{version}/</p>
<pre><code> bin
conf
lib
script
sql
install.sh
</code></pre><ul>
<li>&#x8BF4;&#x660E;</li>
</ul>
<pre><code>bin : &#x57FA;&#x7840;&#x670D;&#x52A1;&#x542F;&#x52A8;&#x811A;&#x672C;
conf : &#x9879;&#x76EE;&#x914D;&#x7F6E;&#x6587;&#x4EF6;
lib : &#x9879;&#x76EE;&#x4F9D;&#x8D56;jar&#x5305;&#xFF0C;&#x5305;&#x62EC;&#x5404;&#x4E2A;&#x6A21;&#x5757;jar&#x548C;&#x7B2C;&#x4E09;&#x65B9;jar
script : &#x96C6;&#x7FA4;&#x542F;&#x52A8;&#x3001;&#x505C;&#x6B62;&#x548C;&#x670D;&#x52A1;&#x76D1;&#x63A7;&#x542F;&#x505C;&#x811A;&#x672C;
sql : &#x9879;&#x76EE;&#x4F9D;&#x8D56;sql&#x6587;&#x4EF6;
install.sh : &#x4E00;&#x952E;&#x90E8;&#x7F72;&#x811A;&#x672C;
</code></pre><h2 id="&#x6570;&#x636E;&#x5E93;&#x521D;&#x59CB;&#x5316;">&#x6570;&#x636E;&#x5E93;&#x521D;&#x59CB;&#x5316;</h2>
<ul>
<li>&#x521B;&#x5EFA;db&#x548C;&#x8D26;&#x53F7;</li>
</ul>
<pre><code>mysql -h {host} -u {user} -p{password}
mysql&gt; CREATE DATABASE escheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
mysql&gt; GRANT ALL PRIVILEGES ON escheduler.* TO &apos;{user}&apos;@&apos;%&apos; IDENTIFIED BY &apos;{password}&apos;;
mysql&gt; GRANT ALL PRIVILEGES ON escheduler.* TO &apos;{user}&apos;@&apos;localhost&apos; IDENTIFIED BY &apos;{password}&apos;;
mysql&gt; flush privileges;
</code></pre><ul>
<li>&#x521B;&#x5EFA;&#x8868;</li>
</ul>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x5728; target/escheduler-{version}/sql/escheduler.sql&#x548C;quartz.sql
mysql -h {host} -u {user} -p{password} -D {db} &lt; escheduler.sql
mysql -h {host} -u {user} -p{password} -D {db} &lt; quartz.sql
</code></pre><h2 id="&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h2>
<p>&#x56E0;&#x4E3A;escheduler worker &#x90FD;&#x662F;&#x4EE5; sudo -u {linux-user} &#x65B9;&#x5F0F;&#x6765;&#x6267;&#x884C;&#x4F5C;&#x4E1A;&#xFF0C;&#x6240;&#x4EE5;&#x90E8;&#x7F72;&#x7528;&#x6237;&#x9700;&#x8981;&#x6709; sudo &#x6743;&#x9650;&#xFF0C;&#x800C;&#x4E14;&#x662F;&#x514D;&#x5BC6;&#x7684;&#x3002;</p>
<pre><code class="lang-&#x90E8;&#x7F72;&#x8D26;&#x53F7;">vi /etc/sudoers
# &#x90E8;&#x7F72;&#x7528;&#x6237;&#x662F; escheduler &#x8D26;&#x53F7;
escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
# &#x5E76;&#x4E14;&#x9700;&#x8981;&#x6CE8;&#x91CA;&#x6389; Default requiretty &#x4E00;&#x884C;
#Default requiretty
</code></pre>
<h2 id="&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;">&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;</h2>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf &#x4E0B;&#x9762;
</code></pre><h3 id="escheduler-alert">escheduler-alert</h3>
<p>&#x914D;&#x7F6E;&#x90AE;&#x4EF6;&#x544A;&#x8B66;&#x4FE1;&#x606F;</p>
<ul>
<li>alert.properties </li>
</ul>
<pre><code>#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.exmail.qq.com
mail.server.port=25
mail.sender=xxxxxxx
mail.passwd=xxxxxxx
# xls file path,need create if not exist
xls.file.path=/opt/xls
</code></pre><h3 id="escheduler-common">escheduler-common</h3>
<p>&#x901A;&#x7528;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x914D;&#x7F6E;&#xFF0C;&#x961F;&#x5217;&#x9009;&#x62E9;&#x53CA;&#x5730;&#x5740;&#x914D;&#x7F6E;&#xFF0C;&#x901A;&#x7528;&#x6587;&#x4EF6;&#x76EE;&#x5F55;&#x914D;&#x7F6E;</p>
<ul>
<li>common/common.properties</li>
</ul>
<pre><code>#task queue implementation, default &quot;zookeeper&quot;
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions&#x3002;&quot;/escheduler&quot; is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
escheduler.env.py=/opt/escheduler_env.py
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default &quot;false&quot;
development.state=false
</code></pre><p>SHELL&#x4EFB;&#x52A1; &#x73AF;&#x5883;&#x53D8;&#x91CF;&#x914D;&#x7F6E;</p>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf/env &#x4E0B;&#x9762;
</code></pre><p>.escheduler_env.sh </p>
<pre><code>export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
</code></pre><p>&#x200B; </p>
<p>Python&#x4EFB;&#x52A1; &#x73AF;&#x5883;&#x53D8;&#x91CF;&#x914D;&#x7F6E;</p>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf/env &#x4E0B;&#x9762;
</code></pre><p>escheduler_env.py</p>
<pre><code>import os
HADOOP_HOME=&quot;/opt/soft/hadoop&quot;
SPARK_HOME1=&quot;/opt/soft/spark1&quot;
SPARK_HOME2=&quot;/opt/soft/spark2&quot;
PYTHON_HOME=&quot;/opt/soft/python&quot;
JAVA_HOME=&quot;/opt/soft/java&quot;
HIVE_HOME=&quot;/opt/soft/hive&quot;
PATH=os.environ[&apos;PATH&apos;]
PATH=&quot;%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s&quot;%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
os.putenv(&apos;PATH&apos;,&apos;%s&apos;%PATH)
</code></pre><p>hadoop &#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>common/hadoop/hadoop.properties</li>
</ul>
<pre><code># ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
</code></pre><p>&#x5B9A;&#x65F6;&#x5668;&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>quartz.properties</li>
</ul>
<pre><code>#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8&amp;useSSL=false
org.quartz.dataSource.myDs.user = xx
org.quartz.dataSource.myDs.password = xx
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1
</code></pre><p>zookeeper &#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>zookeeper.properties</li>
</ul>
<pre><code>#zookeeper cluster
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.masters.failover=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.workers.failover=/escheduler/lock/failover/workers
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5
</code></pre><h3 id="escheduler-dao">escheduler-dao</h3>
<p>dao&#x6570;&#x636E;&#x6E90;&#x914D;&#x7F6E;</p>
<ul>
<li>dao/data_source.properties</li>
</ul>
<pre><code># base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=xx
spring.datasource.password=xx
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
</code></pre><h3 id="escheduler-server">escheduler-server</h3>
<p>master&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>master.properties</li>
</ul>
<pre><code># master execute thread num
master.exec.threads=100
# master execute task number in parallel
master.exec.task.number=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1
</code></pre><p>worker&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>worker.properties</li>
</ul>
<pre><code># worker execute thread num
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 10
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1
</code></pre><h3 id="escheduler-api">escheduler-api</h3>
<p>web&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>application.properties</li>
</ul>
<pre><code># server port
server.port=12345
# session config
server.session.timeout=7200
server.context-path=/escheduler/
# file size limit for upload
spring.http.multipart.max-file-size=1024MB
spring.http.multipart.max-request-size=1024MB
# post content
server.max-http-post-size=5000000
</code></pre><h2 id="&#x4F2A;&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;">&#x4F2A;&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;</h2>
<h3 id="1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h3>
<p>&#x200B; &#x5982;&#x4E0A; <strong>&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p>
<h3 id="2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;hdfs&#x6839;&#x8DEF;&#x5F84;">2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>common/common.properties</strong> &#x4E2D; <strong>hdfs.startup.state</strong> &#x7684;&#x914D;&#x7F6E;&#x6765;&#x5224;&#x65AD;&#x662F;&#x5426;&#x542F;&#x52A8;HDFS&#xFF0C;&#x5982;&#x679C;&#x542F;&#x52A8;&#xFF0C;&#x5219;&#x9700;&#x8981;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong>&#xFF0C;&#x5426;&#x5219;&#x5FFD;&#x7565;&#x6B64;&#x6B65;&#x9AA4;</p>
<h3 id="3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;">3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;</h3>
<p>&#x200B; &#x5982;&#x4E0A;&#x8FDB;&#x884C; <strong>&#x9879;&#x76EE;&#x7F16;&#x8BD1;</strong></p>
<h3 id="4&#xFF0C;&#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;">4&#xFF0C;&#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;</strong> &#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x548C; <strong>&#x73AF;&#x5883;&#x53D8;&#x91CF;</strong> &#x6587;&#x4EF6;</p>
<h3 id="5&#xFF0C;&#x521B;&#x5EFA;&#x76EE;&#x5F55;&#x5E76;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;">5&#xFF0C;&#x521B;&#x5EFA;&#x76EE;&#x5F55;&#x5E76;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;</h3>
<ul>
<li><p>&#x521B;&#x5EFA; <strong>common/common.properties</strong> &#x4E0B;&#x7684;data.basedir.path&#x3001;data.download.basedir.path&#x548C;process.exec.basepath&#x8DEF;&#x5F84;</p>
</li>
<li><p>&#x5C06;<strong>.escheduler_env.sh</strong> &#x548C; <strong>escheduler_env.py</strong> &#x4E24;&#x4E2A;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230; <strong>common/common.properties</strong>&#x914D;&#x7F6E;&#x7684;<strong>escheduler.env.path</strong> &#x548C; <strong>escheduler.env.py</strong> &#x7684;&#x76EE;&#x5F55;&#x4E0B;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p>
</li>
</ul>
<h3 id="6&#xFF0C;&#x542F;&#x505C;&#x670D;&#x52A1;">6&#xFF0C;&#x542F;&#x505C;&#x670D;&#x52A1;</h3>
<ul>
<li>&#x542F;&#x505C;Master</li>
</ul>
<pre><code class="lang-&#x542F;&#x52A8;master">sh ./bin/arklifter-daemon.sh start master-server
sh ./bin/arklifter-daemon.sh stop master-server
</code></pre>
<ul>
<li>&#x542F;&#x505C;Worker</li>
</ul>
<pre><code>sh ./bin/arklifter-daemon.sh start worker-server
sh ./bin/arklifter-daemon.sh stop worker-server
</code></pre><ul>
<li>&#x542F;&#x505C;Api</li>
</ul>
<pre><code>sh ./bin/arklifter-daemon.sh start api-server
sh ./bin/arklifter-daemon.sh stop api-server
</code></pre><ul>
<li>&#x542F;&#x505C;Logger</li>
</ul>
<pre><code>sh ./bin/arklifter-daemon.sh start logger-server
sh ./bin/arklifter-daemon.sh stop logger-server
</code></pre><ul>
<li>&#x542F;&#x505C;Alert</li>
</ul>
<pre><code>sh ./bin/arklifter-daemon.sh start alert-server
sh ./bin/arklifter-daemon.sh stop alert-server
</code></pre><h2 id="&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;">&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;</h2>
<h3 id="1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h3>
<ul>
<li>&#x5728;&#x9700;&#x8981;&#x90E8;&#x7F72;&#x8C03;&#x5EA6;&#x7684;&#x673A;&#x5668;&#x4E0A;&#x5982;&#x4E0A; <strong>&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></li>
<li><a href="https://blog.csdn.net/thinkmore1314/article/details/22489203" target="_blank">&#x5C06; <strong>&#x4E3B;&#x673A;&#x5668;</strong> &#x548C;&#x5404;&#x4E2A;&#x5176;&#x5B83;&#x673A;&#x5668;SSH&#x6253;&#x901A;</a></li>
</ul>
<h3 id="2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;hdfs&#x6839;&#x8DEF;&#x5F84;">2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>common/common.properties</strong> &#x4E2D; <strong>hdfs.startup.state</strong> &#x7684;&#x914D;&#x7F6E;&#x6765;&#x5224;&#x65AD;&#x662F;&#x5426;&#x542F;&#x52A8;HDFS&#xFF0C;&#x5982;&#x679C;&#x542F;&#x52A8;&#xFF0C;&#x5219;&#x9700;&#x8981;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong>&#xFF0C;&#x5426;&#x5219;&#x5FFD;&#x7565;&#x6B64;&#x6B65;&#x9AA4;</p>
<h3 id="3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;">3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;</h3>
<p>&#x200B; &#x5982;&#x4E0A;&#x8FDB;&#x884C; <strong>&#x9879;&#x76EE;&#x7F16;&#x8BD1;</strong></p>
<h3 id="4&#xFF0C;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;">4&#xFF0C;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;</h3>
<p>&#x200B; &#x5C06;<strong>.escheduler_env.sh</strong> &#x548C; <strong>escheduler_env.py</strong> &#x4E24;&#x4E2A;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230; <strong>common/common.properties</strong>&#x914D;&#x7F6E;&#x7684;<strong>escheduler.env.path</strong> &#x548C; <strong>escheduler.env.py</strong> &#x7684;&#x76EE;&#x5F55;&#x4E0B;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p>
<h3 id="5&#xFF0C;&#x4FEE;&#x6539;-installsh">5&#xFF0C;&#x4FEE;&#x6539; install.sh</h3>
<p>&#x200B; &#x4FEE;&#x6539; install.sh &#x4E2D;&#x53D8;&#x91CF;&#x7684;&#x503C;&#xFF0C;&#x66FF;&#x6362;&#x6210;&#x81EA;&#x8EAB;&#x4E1A;&#x52A1;&#x6240;&#x9700;&#x7684;&#x503C;</p>
<h3 id="6&#xFF0C;&#x4E00;&#x952E;&#x90E8;&#x7F72;">6&#xFF0C;&#x4E00;&#x952E;&#x90E8;&#x7F72;</h3>
<ul>
<li><p>&#x5B89;&#x88C5; pip install kazoo</p>
</li>
<li><p>&#x4F7F;&#x7528;&#x90E8;&#x7F72;&#x7528;&#x6237; sh install.sh &#x4E00;&#x952E;&#x90E8;&#x7F72;</p>
</li>
</ul>
<h2 id="&#x670D;&#x52A1;&#x76D1;&#x63A7;">&#x670D;&#x52A1;&#x76D1;&#x63A7;</h2>
<p>monitor_server.py &#x811A;&#x672C;&#x662F;&#x76D1;&#x542C;&#xFF0C;master&#x548C;worker&#x670D;&#x52A1;&#x6302;&#x6389;&#x91CD;&#x542F;&#x7684;&#x811A;&#x672C;</p>
<p>&#x6CE8;&#x610F;&#xFF1A;&#x5728;&#x5168;&#x90E8;&#x670D;&#x52A1;&#x90FD;&#x542F;&#x52A8;&#x4E4B;&#x540E;&#x542F;&#x52A8;</p>
<p>nohup python -u monitor_server.py &gt; nohup.out 2&gt;&amp;1 &amp;</p>
<h2 id="&#x65E5;&#x5FD7;&#x67E5;&#x770B;">&#x65E5;&#x5FD7;&#x67E5;&#x770B;</h2>
<p>&#x65E5;&#x5FD7;&#x7EDF;&#x4E00;&#x5B58;&#x653E;&#x4E8E;&#x6307;&#x5B9A;&#x6587;&#x4EF6;&#x5939;&#x5185;</p>
<pre><code class="lang-&#x65E5;&#x5FD7;&#x8DEF;&#x5F84;"> logs/
&#x251C;&#x2500;&#x2500; escheduler-alert-server.log
&#x251C;&#x2500;&#x2500; escheduler-master-server.log
|&#x2014;&#x2014; escheduler-worker-server.log
|&#x2014;&#x2014; escheduler-api-server.log
|&#x2014;&#x2014; escheduler-logger-server.log
</code></pre>
</section>
</div>
<div class="search-results">
<div class="has-results">
<h1 class="search-results-title"><span class='search-results-count'></span> results matching "<span class='search-query'></span>"</h1>
<ul class="search-results-list"></ul>
</div>
<div class="no-results">
<h1 class="search-results-title">No results matching "<span class='search-query'></span>"</h1>
</div>
</div>
</div>
</div>
</div>
</div>
<a href="architecture-design.html#调度系统架构设计" class="navigation navigation-prev " aria-label="Previous page: 系统架构设计">
<i class="fa fa-angle-left"></i>
</a>
<a href="task-plugin-development.html#任务插件开发" class="navigation navigation-next " aria-label="Next page: 自定义任务插件文档">
<i class="fa fa-angle-right"></i>
</a>
</div>
<script>
var gitbook = gitbook || [];
gitbook.push(function() {
gitbook.page.hasChanged({"page":{"title":"部署文档","level":"1.3.2","depth":2,"next":{"title":"自定义任务插件文档","level":"1.3.3","depth":2,"anchor":"#任务插件开发","path":"task-plugin-development.md","ref":"task-plugin-development.md#任务插件开发","articles":[]},"previous":{"title":"系统架构设计","level":"1.3.1","depth":2,"anchor":"#调度系统架构设计","path":"architecture-design.md","ref":"architecture-design.md#调度系统架构设计","articles":[]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"insert-logo-link":{"src":"../images/logo.png","url":"/"},"expandable-chapters":{},"highlight":{},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"fontsettings":{"theme":"white","family":"sans","size":2},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"backend-deploy.md","mtime":"2019-03-28T11:14:39.609Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-03-28T11:15:13.750Z"},"basePath":".","book":{"language":""}});
});
</script>
</div>
<script src="gitbook/gitbook.js"></script>
<script src="gitbook/theme.js"></script>
<script src="gitbook/gitbook-plugin-expandable-chapters/expandable-chapters.js"></script>
<script src="gitbook/gitbook-plugin-insert-logo-link/plugin.js"></script>
<script src="gitbook/gitbook-plugin-search/search-engine.js"></script>
<script src="gitbook/gitbook-plugin-search/search.js"></script>
<script src="gitbook/gitbook-plugin-lunr/lunr.min.js"></script>
<script src="gitbook/gitbook-plugin-lunr/search-lunr.js"></script>
<script src="gitbook/gitbook-plugin-sharing/buttons.js"></script>
<script src="gitbook/gitbook-plugin-fontsettings/fontsettings.js"></script>
</body>
</html>