Browse Source

Merge remote-tracking branch 'upstream/dev' into dev

pull/1/MERGE
ligang 6 years ago
parent
commit
81c39f1053
  1. 4
      docs/zh_CN/_book/前端部署文档.html
  2. 391
      docs/zh_CN/_book/后端部署文档.html
  3. 2
      docs/zh_CN/前端部署文档.md
  4. 473
      docs/zh_CN/后端部署文档.md
  5. 26
      escheduler-ui/src/js/conf/home/pages/security/pages/queue/_source/createQueue.vue
  6. 115
      install.sh
  7. 0
      script/monitor_server.py
  8. 2
      script/scp_hosts.sh

4
docs/zh_CN/_book/前端部署文档.html

@ -469,7 +469,7 @@ API_BASE = http://192.168.220.204:12345
<li><p><code>npm run build</code> &#x9879;&#x76EE;&#x6253;&#x5305; (&#x6253;&#x5305;&#x540E;&#x6839;&#x76EE;&#x5F55;&#x4F1A;&#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x540D;&#x4E3A;dist&#x6587;&#x4EF6;&#x5939;&#xFF0C;&#x7528;&#x4E8E;&#x53D1;&#x5E03;&#x7EBF;&#x4E0A;Nginx)</p> <li><p><code>npm run build</code> &#x9879;&#x76EE;&#x6253;&#x5305; (&#x6253;&#x5305;&#x540E;&#x6839;&#x76EE;&#x5F55;&#x4F1A;&#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x540D;&#x4E3A;dist&#x6587;&#x4EF6;&#x5939;&#xFF0C;&#x7528;&#x4E8E;&#x53D1;&#x5E03;&#x7EBF;&#x4E0A;Nginx)</p>
</li> </li>
</ul> </ul>
<h3 id="2&#x81EA;&#x52A8;&#x5316;&#x90E8;&#x7F72;">2.&#x81EA;&#x52A8;&#x5316;&#x90E8;&#x7F72;`</h3> <h3 id="2&#x81EA;&#x52A8;&#x5316;&#x90E8;&#x7F72;">2.&#x81EA;&#x52A8;&#x5316;&#x90E8;&#x7F72;</h3>
<p>&#x5728;&#x9879;&#x76EE;<code>escheduler-ui</code>&#x6839;&#x76EE;&#x5F55;&#x7F16;&#x8F91;&#x5B89;&#x88C5;&#x6587;&#x4EF6;<code>vi install(&#x7EBF;&#x4E0A;&#x73AF;&#x5883;).sh</code></p> <p>&#x5728;&#x9879;&#x76EE;<code>escheduler-ui</code>&#x6839;&#x76EE;&#x5F55;&#x7F16;&#x8F91;&#x5B89;&#x88C5;&#x6587;&#x4EF6;<code>vi install(&#x7EBF;&#x4E0A;&#x73AF;&#x5883;).sh</code></p>
<p>&#x66F4;&#x6539;&#x524D;&#x7AEF;&#x8BBF;&#x95EE;&#x7AEF;&#x53E3;&#x548C;&#x540E;&#x7AEF;&#x4EE3;&#x7406;&#x63A5;&#x53E3;&#x5730;&#x5740;</p> <p>&#x66F4;&#x6539;&#x524D;&#x7AEF;&#x8BBF;&#x95EE;&#x7AEF;&#x53E3;&#x548C;&#x540E;&#x7AEF;&#x4EE3;&#x7406;&#x63A5;&#x53E3;&#x5730;&#x5740;</p>
<pre><code># &#x914D;&#x7F6E;&#x524D;&#x7AEF;&#x8BBF;&#x95EE;&#x7AEF;&#x53E3; <pre><code># &#x914D;&#x7F6E;&#x524D;&#x7AEF;&#x8BBF;&#x95EE;&#x7AEF;&#x53E3;
@ -604,7 +604,7 @@ client_max_body_size 1024m
<script> <script>
var gitbook = gitbook || []; var gitbook = gitbook || [];
gitbook.push(function() { gitbook.push(function() {
gitbook.page.hasChanged({"page":{"title":"环境搭建","level":"1.2.1","depth":2,"next":{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},"previous":{"title":"前端部署文档","level":"1.2","depth":1,"ref":"","articles":[{"title":"环境搭建","level":"1.2.1","depth":2,"anchor":"#前端项目环境构建及编译","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目环境构建及编译","articles":[]},{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},{"title":"项目生产环境Nginx配置","level":"1.2.3","depth":2,"anchor":"#项目生产环境配置","path":"前端部署文档.md","ref":"前端部署文档.md#项目生产环境配置","articles":[]},{"title":"前端项目发布","level":"1.2.4","depth":2,"anchor":"#前端项目发布","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目发布","articles":[]},{"title":"问题","level":"1.2.5","depth":2,"anchor":"#问题","path":"前端部署文档.md","ref":"前端部署文档.md#问题","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"前端部署文档.md","mtime":"2019-04-12T01:30:07.632Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}}); gitbook.page.hasChanged({"page":{"title":"环境搭建","level":"1.2.1","depth":2,"next":{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},"previous":{"title":"前端部署文档","level":"1.2","depth":1,"ref":"","articles":[{"title":"环境搭建","level":"1.2.1","depth":2,"anchor":"#前端项目环境构建及编译","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目环境构建及编译","articles":[]},{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},{"title":"项目生产环境Nginx配置","level":"1.2.3","depth":2,"anchor":"#项目生产环境配置","path":"前端部署文档.md","ref":"前端部署文档.md#项目生产环境配置","articles":[]},{"title":"前端项目发布","level":"1.2.4","depth":2,"anchor":"#前端项目发布","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目发布","articles":[]},{"title":"问题","level":"1.2.5","depth":2,"anchor":"#问题","path":"前端部署文档.md","ref":"前端部署文档.md#问题","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"前端部署文档.md","mtime":"2019-04-12T03:16:34.222Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
}); });
</script> </script>
</div> </div>

391
docs/zh_CN/_book/后端部署文档.html

@ -435,7 +435,7 @@
<li><a href="https://blog.csdn.net/u011886447/article/details/79796802" target="_blank">Mysql</a> (5.5+) : &#x5FC5;&#x88C5;</li> <li><a href="https://blog.csdn.net/u011886447/article/details/79796802" target="_blank">Mysql</a> (5.5+) : &#x5FC5;&#x88C5;</li>
<li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html" target="_blank">JDK</a> (1.8+) : &#x5FC5;&#x88C5;</li> <li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html" target="_blank">JDK</a> (1.8+) : &#x5FC5;&#x88C5;</li>
<li><a href="https://www.jianshu.com/p/de90172ea680" target="_blank">ZooKeeper</a>(3.4.6) &#xFF1A;&#x5FC5;&#x88C5; </li> <li><a href="https://www.jianshu.com/p/de90172ea680" target="_blank">ZooKeeper</a>(3.4.6) &#xFF1A;&#x5FC5;&#x88C5; </li>
<li><a href="https://blog.csdn.net/Evankaka/article/details/51612437" target="_blank">Hadoop</a>(2.7.3) &#xFF1A;&#x9009;&#x88C5;&#xFF0C; &#x5982;&#x679C;&#x9700;&#x8981;&#x4F7F;&#x7528;&#x5230;&#x8D44;&#x6E90;&#x4E0A;&#x4F20;&#x529F;&#x80FD;&#xFF0C;MapReduce&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x5219;&#x9700;&#x8981;&#x914D;&#x7F6E;Hadoop(&#x4E0A;&#x4F20;&#x7684;&#x8D44;&#x6E90;&#x6587;&#x4EF6;&#x76EE;&#x524D;&#x4FDD;&#x5B58;&#x5728;Hdfs&#x4E0A;)</li> <li><a href="https://blog.csdn.net/Evankaka/article/details/51612437" target="_blank">Hadoop</a>(2.6+) &#xFF1A;&#x9009;&#x88C5;&#xFF0C; &#x5982;&#x679C;&#x9700;&#x8981;&#x4F7F;&#x7528;&#x5230;&#x8D44;&#x6E90;&#x4E0A;&#x4F20;&#x529F;&#x80FD;&#xFF0C;MapReduce&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x5219;&#x9700;&#x8981;&#x914D;&#x7F6E;Hadoop(&#x4E0A;&#x4F20;&#x7684;&#x8D44;&#x6E90;&#x6587;&#x4EF6;&#x76EE;&#x524D;&#x4FDD;&#x5B58;&#x5728;Hdfs&#x4E0A;)</li>
<li><a href="https://staroon.pro/2017/12/09/HiveInstall/" target="_blank">Hive</a>(1.2.1) : &#x9009;&#x88C5;&#xFF0C;hive&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li> <li><a href="https://staroon.pro/2017/12/09/HiveInstall/" target="_blank">Hive</a>(1.2.1) : &#x9009;&#x88C5;&#xFF0C;hive&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
<li>Spark(1.x,2.x) : &#x9009;&#x88C5;&#xFF0C;Spark&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li> <li>Spark(1.x,2.x) : &#x9009;&#x88C5;&#xFF0C;Spark&#x4EFB;&#x52A1;&#x63D0;&#x4EA4;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
<li>PostgreSQL(8.2.15+) : &#x9009;&#x88C5;&#xFF0C;PostgreSQL PostgreSQL&#x5B58;&#x50A8;&#x8FC7;&#x7A0B;&#x9700;&#x8981;&#x5B89;&#x88C5;</li> <li>PostgreSQL(8.2.15+) : &#x9009;&#x88C5;&#xFF0C;PostgreSQL PostgreSQL&#x5B58;&#x50A8;&#x8FC7;&#x7A0B;&#x9700;&#x8981;&#x5B89;&#x88C5;</li>
@ -450,13 +450,7 @@
<li>&#x67E5;&#x770B;&#x76EE;&#x5F55;</li> <li>&#x67E5;&#x770B;&#x76EE;&#x5F55;</li>
</ul> </ul>
<p>&#x6B63;&#x5E38;&#x7F16;&#x8BD1;&#x5B8C;&#x540E;&#xFF0C;&#x4F1A;&#x5728;&#x5F53;&#x524D;&#x76EE;&#x5F55;&#x751F;&#x6210; target/escheduler-{version}/</p> <p>&#x6B63;&#x5E38;&#x7F16;&#x8BD1;&#x5B8C;&#x540E;&#xFF0C;&#x4F1A;&#x5728;&#x5F53;&#x524D;&#x76EE;&#x5F55;&#x751F;&#x6210; target/escheduler-{version}/</p>
<pre><code> bin <ul>
conf
lib
script
sql
install.sh
</code></pre><ul>
<li>&#x8BF4;&#x660E;</li> <li>&#x8BF4;&#x660E;</li>
</ul> </ul>
<pre><code>bin : &#x57FA;&#x7840;&#x670D;&#x52A1;&#x542F;&#x52A8;&#x811A;&#x672C; <pre><code>bin : &#x57FA;&#x7840;&#x670D;&#x52A1;&#x542F;&#x52A8;&#x811A;&#x672C;
@ -483,7 +477,9 @@ mysql -h {host} -u {user} -p{password} -D {db} &lt; escheduler.sql
mysql -h {host} -u {user} -p{password} -D {db} &lt; quartz.sql mysql -h {host} -u {user} -p{password} -D {db} &lt; quartz.sql
</code></pre><h2 id="&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h2> </code></pre><h2 id="&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h2>
<p>&#x56E0;&#x4E3A;escheduler worker&#x90FD;&#x662F;&#x4EE5; sudo -u {linux-user} &#x65B9;&#x5F0F;&#x6765;&#x6267;&#x884C;&#x4F5C;&#x4E1A;&#xFF0C;&#x6240;&#x4EE5;&#x90E8;&#x7F72;&#x7528;&#x6237;&#x9700;&#x8981;&#x6709; sudo &#x6743;&#x9650;&#xFF0C;&#x800C;&#x4E14;&#x662F;&#x514D;&#x5BC6;&#x7684;&#x3002;</p> <ul>
<li>&#x5728;&#x6240;&#x6709;&#x9700;&#x8981;&#x90E8;&#x7F72;&#x8C03;&#x5EA6;&#x7684;&#x673A;&#x5668;&#x4E0A;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;&#xFF0C;&#x56E0;&#x4E3A;worker&#x670D;&#x52A1;&#x662F;&#x4EE5; sudo -u {linux-user} &#x65B9;&#x5F0F;&#x6765;&#x6267;&#x884C;&#x4F5C;&#x4E1A;&#xFF0C;&#x6240;&#x4EE5;&#x90E8;&#x7F72;&#x7528;&#x6237;&#x9700;&#x8981;&#x6709; sudo &#x6743;&#x9650;&#xFF0C;&#x800C;&#x4E14;&#x662F;&#x514D;&#x5BC6;&#x7684;&#x3002;</li>
</ul>
<pre><code class="lang-&#x90E8;&#x7F72;&#x8D26;&#x53F7;">vi /etc/sudoers <pre><code class="lang-&#x90E8;&#x7F72;&#x8D26;&#x53F7;">vi /etc/sudoers
# &#x90E8;&#x7F72;&#x7528;&#x6237;&#x662F; escheduler &#x8D26;&#x53F7; # &#x90E8;&#x7F72;&#x7528;&#x6237;&#x662F; escheduler &#x8D26;&#x53F7;
@ -492,301 +488,65 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
# &#x5E76;&#x4E14;&#x9700;&#x8981;&#x6CE8;&#x91CA;&#x6389; Default requiretty &#x4E00;&#x884C; # &#x5E76;&#x4E14;&#x9700;&#x8981;&#x6CE8;&#x91CA;&#x6389; Default requiretty &#x4E00;&#x884C;
#Default requiretty #Default requiretty
</code></pre> </code></pre>
<h2 id="&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;">&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;</h2> <h2 id="ssh&#x514D;&#x5BC6;&#x914D;&#x7F6E;">ssh&#x514D;&#x5BC6;&#x914D;&#x7F6E;</h2>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf &#x4E0B;&#x9762; <p> &#x5728;&#x90E8;&#x7F72;&#x673A;&#x5668;&#x548C;&#x5176;&#x4ED6;&#x5B89;&#x88C5;&#x673A;&#x5668;&#x4E0A;&#x914D;&#x7F6E;ssh&#x514D;&#x5BC6;&#x767B;&#x5F55;&#xFF0C;&#x5982;&#x679C;&#x8981;&#x5728;&#x90E8;&#x7F72;&#x673A;&#x4E0A;&#x5B89;&#x88C5;&#x8C03;&#x5EA6;&#xFF0C;&#x9700;&#x8981;&#x914D;&#x7F6E;&#x672C;&#x673A;&#x514D;&#x5BC6;&#x767B;&#x5F55;&#x81EA;&#x5DF1;</p>
</code></pre><h3 id="escheduler-alert">escheduler-alert</h3>
<p>&#x914D;&#x7F6E;&#x90AE;&#x4EF6;&#x544A;&#x8B66;&#x4FE1;&#x606F;</p>
<ul>
<li>alert.properties </li>
</ul>
<pre><code>#&#x4EE5;qq&#x90AE;&#x7BB1;&#x4E3A;&#x4F8B;&#xFF0C;&#x5982;&#x679C;&#x662F;&#x522B;&#x7684;&#x90AE;&#x7BB1;&#xFF0C;&#x8BF7;&#x66F4;&#x6539;&#x5BF9;&#x5E94;&#x914D;&#x7F6E;
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.exmail.qq.com
mail.server.port=25
mail.sender=xxxxxxx@qq.com
mail.passwd=xxxxxxx
# xls file path, need manually create it before use if not exist
xls.file.path=/opt/xls
</code></pre><h3 id="escheduler-common">escheduler-common</h3>
<p>&#x901A;&#x7528;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x914D;&#x7F6E;&#xFF0C;&#x961F;&#x5217;&#x9009;&#x62E9;&#x53CA;&#x5730;&#x5740;&#x914D;&#x7F6E;&#xFF0C;&#x901A;&#x7528;&#x6587;&#x4EF6;&#x76EE;&#x5F55;&#x914D;&#x7F6E;</p>
<ul>
<li>common/common.properties</li>
</ul>
<pre><code>#task queue implementation, default &quot;zookeeper&quot;
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions&#x3002;&quot;/escheduler&quot; is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
escheduler.env.py=/opt/escheduler_env.py
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default &quot;false&quot;
development.state=false
</code></pre><p>SHELL&#x4EFB;&#x52A1; &#x73AF;&#x5883;&#x53D8;&#x91CF;&#x914D;&#x7F6E;</p>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf/env &#x4E0B;&#x9762;&#xFF0C;&#x8FD9;&#x4E2A;&#x4F1A;&#x662F;Worker&#x6267;&#x884C;&#x4EFB;&#x52A1;&#x65F6;&#x52A0;&#x8F7D;&#x7684;&#x73AF;&#x5883;
</code></pre><p>.escheduler_env.sh </p>
<pre><code>export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
</code></pre><p>&#x200B; </p>
<p>Python&#x4EFB;&#x52A1; &#x73AF;&#x5883;&#x53D8;&#x91CF;&#x914D;&#x7F6E;</p>
<pre><code>&#x8BF4;&#x660E;&#xFF1A;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x4F4D;&#x4E8E; target/escheduler-{version}/conf/env &#x4E0B;&#x9762;
</code></pre><p>escheduler_env.py</p>
<pre><code>import os
HADOOP_HOME=&quot;/opt/soft/hadoop&quot;
SPARK_HOME1=&quot;/opt/soft/spark1&quot;
SPARK_HOME2=&quot;/opt/soft/spark2&quot;
PYTHON_HOME=&quot;/opt/soft/python&quot;
JAVA_HOME=&quot;/opt/soft/java&quot;
HIVE_HOME=&quot;/opt/soft/hive&quot;
PATH=os.environ[&apos;PATH&apos;]
PATH=&quot;%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s&quot;%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
os.putenv(&apos;PATH&apos;,&apos;%s&apos;%PATH)
</code></pre><p>hadoop &#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>common/hadoop/hadoop.properties</li>
</ul>
<pre><code># ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
</code></pre><p>&#x5B9A;&#x65F6;&#x5668;&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>quartz.properties</li>
</ul>
<pre><code>#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8&amp;useSSL=false
org.quartz.dataSource.myDs.user = xx
org.quartz.dataSource.myDs.password = xx
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1
</code></pre><p>zookeeper &#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul> <ul>
<li>zookeeper.properties</li> <li><a href="http://geek.analysys.cn/topic/113" target="_blank">&#x5C06; <strong>&#x4E3B;&#x673A;&#x5668;</strong> &#x548C;&#x5404;&#x4E2A;&#x5176;&#x5B83;&#x673A;&#x5668;SSH&#x6253;&#x901A;</a></li>
</ul> </ul>
<pre><code>#zookeeper cluster <h2 id="&#x90E8;&#x7F72;">&#x90E8;&#x7F72;</h2>
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 <h3 id="1-&#x4FEE;&#x6539;&#x5B89;&#x88C5;&#x76EE;&#x5F55;&#x6743;&#x9650;">1. &#x4FEE;&#x6539;&#x5B89;&#x88C5;&#x76EE;&#x5F55;&#x6743;&#x9650;</h3>
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.masters.failover=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.workers.failover=/escheduler/lock/failover/workers
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5
</code></pre><h3 id="escheduler-dao">escheduler-dao</h3>
<p>dao&#x6570;&#x636E;&#x6E90;&#x914D;&#x7F6E;</p>
<ul> <ul>
<li>dao/data_source.properties</li> <li>&#x5B89;&#x88C5;&#x76EE;&#x5F55;&#x5982;&#x4E0B;&#xFF1A;</li>
</ul> </ul>
<pre><code># base spring data source configuration <pre><code> bin
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource conf
spring.datasource.driver-class-name=com.mysql.jdbc.Driver install.sh
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8 lib
spring.datasource.username=xx script
spring.datasource.password=xx sql
</code></pre><ul>
# connection configuration <li><p>&#x4FEE;&#x6539;&#x6743;&#x9650;(deployUser&#x4FEE;&#x6539;&#x4E3A;&#x5BF9;&#x5E94;&#x90E8;&#x7F72;&#x7528;&#x6237;)</p>
spring.datasource.initialSize=5 <p> <code>sudo chown -R deployUser:deployUser *</code></p>
# min connection number </li>
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
</code></pre><h3 id="escheduler-server">escheduler-server</h3>
<p>master&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul>
<li>master.properties</li>
</ul> </ul>
<pre><code># master execute thread num <h3 id="2-&#x4FEE;&#x6539;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;">2. &#x4FEE;&#x6539;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;</h3>
master.exec.threads=100
# master execute task number in parallel
master.exec.task.number=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1
</code></pre><p>worker&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul> <ul>
<li>worker.properties</li> <li>&#x6839;&#x636E;&#x4E1A;&#x52A1;&#x9700;&#x6C42;&#xFF0C;&#x4FEE;&#x6539;conf/env/&#x76EE;&#x5F55;&#x4E0B;&#x7684;<strong>escheduler_env.py</strong>&#xFF0C;<strong>.escheduler_env.sh</strong>&#x4E24;&#x4E2A;&#x6587;&#x4EF6;&#x4E2D;&#x7684;&#x73AF;&#x5883;&#x53D8;&#x91CF;</li>
</ul> </ul>
<pre><code># worker execute thread num <h3 id="3-&#x4FEE;&#x6539;&#x90E8;&#x7F72;&#x53C2;&#x6570;">3. &#x4FEE;&#x6539;&#x90E8;&#x7F72;&#x53C2;&#x6570;</h3>
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 10
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1
</code></pre><h3 id="escheduler-api">escheduler-api</h3>
<p>web&#x914D;&#x7F6E;&#x6587;&#x4EF6;</p>
<ul> <ul>
<li>application.properties</li> <li><p>&#x4FEE;&#x6539; <strong>install.sh</strong>&#x4E2D;&#x7684;&#x53C2;&#x6570;&#xFF0C;&#x66FF;&#x6362;&#x6210;&#x81EA;&#x8EAB;&#x4E1A;&#x52A1;&#x6240;&#x9700;&#x7684;&#x503C;</p>
</li>
<li><p>&#x5982;&#x679C;&#x4F7F;&#x7528;hdfs&#x76F8;&#x5173;&#x529F;&#x80FD;&#xFF0C;&#x9700;&#x8981;&#x62F7;&#x8D1D;<strong>hdfs-site.xml</strong>&#x548C;<strong>core-site.xml</strong>&#x5230;conf&#x76EE;&#x5F55;&#x4E0B;</p>
</li>
</ul> </ul>
<pre><code># server port <h3 id="4-&#x4E00;&#x952E;&#x90E8;&#x7F72;">4. &#x4E00;&#x952E;&#x90E8;&#x7F72;</h3>
server.port=12345
# session config
server.session.timeout=7200
server.context-path=/escheduler/
# file size limit for upload
spring.http.multipart.max-file-size=1024MB
spring.http.multipart.max-request-size=1024MB
# post content
server.max-http-post-size=5000000
</code></pre><h2 id="&#x4F2A;&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;">&#x4F2A;&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;</h2>
<h3 id="1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h3>
<p>&#x200B; &#x5982;&#x4E0A; <strong>&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p>
<h3 id="2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;hdfs&#x6839;&#x8DEF;&#x5F84;">2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>common/common.properties</strong> &#x4E2D; <strong>hdfs.startup.state</strong> &#x7684;&#x914D;&#x7F6E;&#x6765;&#x5224;&#x65AD;&#x662F;&#x5426;&#x542F;&#x52A8;HDFS&#xFF0C;&#x5982;&#x679C;&#x542F;&#x52A8;&#xFF0C;&#x5219;&#x9700;&#x8981;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong>&#xFF0C;&#x5426;&#x5219;&#x5FFD;&#x7565;&#x6B64;&#x6B65;&#x9AA4;</p>
<h3 id="3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;">3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;</h3>
<p>&#x200B; &#x5982;&#x4E0A;&#x8FDB;&#x884C; <strong>&#x9879;&#x76EE;&#x7F16;&#x8BD1;</strong></p>
<h3 id="4&#xFF0C;&#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;">4&#xFF0C;&#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x8BF4;&#x660E;</strong> &#x4FEE;&#x6539;&#x914D;&#x7F6E;&#x6587;&#x4EF6;&#x548C; <strong>&#x73AF;&#x5883;&#x53D8;&#x91CF;</strong> &#x6587;&#x4EF6;</p>
<h3 id="5&#xFF0C;&#x521B;&#x5EFA;&#x76EE;&#x5F55;&#x5E76;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;">5&#xFF0C;&#x521B;&#x5EFA;&#x76EE;&#x5F55;&#x5E76;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;</h3>
<ul> <ul>
<li><p>&#x521B;&#x5EFA; <strong>common/common.properties</strong> &#x4E0B;&#x7684;data.basedir.path&#x3001;data.download.basedir.path&#x548C;process.exec.basepath&#x8DEF;&#x5F84;</p> <li><p>&#x5B89;&#x88C5;zookeeper&#x5DE5;&#x5177; </p>
<p> <code>pip install kazoo</code></p>
</li>
<li><p>&#x5207;&#x6362;&#x5230;&#x90E8;&#x7F72;&#x7528;&#x6237;&#xFF0C;&#x4E00;&#x952E;&#x90E8;&#x7F72;</p>
<p> <code>sh install.sh</code> </p>
</li> </li>
<li><p>&#x5C06;<strong>.escheduler_env.sh</strong> &#x548C; <strong>escheduler_env.py</strong> &#x4E24;&#x4E2A;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230; <strong>common/common.properties</strong>&#x914D;&#x7F6E;&#x7684;<strong>escheduler.env.path</strong> &#x548C; <strong>escheduler.env.py</strong> &#x7684;&#x76EE;&#x5F55;&#x4E0B;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p> <li><p>jps&#x67E5;&#x770B;&#x670D;&#x52A1;&#x662F;&#x5426;&#x542F;&#x52A8;</p>
</li> </li>
</ul> </ul>
<h3 id="6&#xFF0C;&#x542F;&#x505C;&#x670D;&#x52A1;">6&#xFF0C;&#x542F;&#x505C;&#x670D;&#x52A1;</h3> <pre><code class="lang-aidl"> MasterServer ----- master&#x670D;&#x52A1;
WorkerServer ----- worker&#x670D;&#x52A1;
LoggerServer ----- logger&#x670D;&#x52A1;
ApiApplicationServer ----- api&#x670D;&#x52A1;
AlertServer ----- alert&#x670D;&#x52A1;
</code></pre>
<h2 id="&#x65E5;&#x5FD7;&#x67E5;&#x770B;">&#x65E5;&#x5FD7;&#x67E5;&#x770B;</h2>
<p>&#x65E5;&#x5FD7;&#x7EDF;&#x4E00;&#x5B58;&#x653E;&#x4E8E;&#x6307;&#x5B9A;&#x6587;&#x4EF6;&#x5939;&#x5185;</p>
<pre><code class="lang-&#x65E5;&#x5FD7;&#x8DEF;&#x5F84;"> logs/
&#x251C;&#x2500;&#x2500; escheduler-alert-server.log
&#x251C;&#x2500;&#x2500; escheduler-master-server.log
|&#x2014;&#x2014; escheduler-worker-server.log
|&#x2014;&#x2014; escheduler-api-server.log
|&#x2014;&#x2014; escheduler-logger-server.log
</code></pre>
<h2 id="&#x542F;&#x505C;&#x670D;&#x52A1;">&#x542F;&#x505C;&#x670D;&#x52A1;</h2>
<ul> <ul>
<li>&#x542F;&#x505C;Master</li> <li>&#x542F;&#x505C;Master</li>
</ul> </ul>
@ -813,55 +573,8 @@ sh ./bin/escheduler-daemon.sh stop logger-server
</ul> </ul>
<pre><code>sh ./bin/escheduler-daemon.sh start alert-server <pre><code>sh ./bin/escheduler-daemon.sh start alert-server
sh ./bin/escheduler-daemon.sh stop alert-server sh ./bin/escheduler-daemon.sh stop alert-server
</code></pre><h2 id="&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;">&#x5206;&#x5E03;&#x5F0F;&#x90E8;&#x7F72;</h2>
<h3 id="1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;">1&#xFF0C;&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</h3>
<ul>
<li>&#x5728;&#x9700;&#x8981;&#x90E8;&#x7F72;&#x8C03;&#x5EA6;&#x7684;&#x673A;&#x5668;&#x4E0A;&#x5982;&#x4E0A; <strong>&#x521B;&#x5EFA;&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></li>
<li><a href="https://blog.csdn.net/thinkmore1314/article/details/22489203" target="_blank">&#x5C06; <strong>&#x4E3B;&#x673A;&#x5668;</strong> &#x548C;&#x5404;&#x4E2A;&#x5176;&#x5B83;&#x673A;&#x5668;SSH&#x6253;&#x901A;</a></li>
</ul>
<h3 id="2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;hdfs&#x6839;&#x8DEF;&#x5F84;">2&#xFF0C;&#x6839;&#x636E;&#x5B9E;&#x9645;&#x9700;&#x6C42;&#x6765;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;</h3>
<p>&#x200B; &#x6839;&#x636E; <strong>common/common.properties</strong> &#x4E2D; <strong>hdfs.startup.state</strong> &#x7684;&#x914D;&#x7F6E;&#x6765;&#x5224;&#x65AD;&#x662F;&#x5426;&#x542F;&#x52A8;HDFS&#xFF0C;&#x5982;&#x679C;&#x542F;&#x52A8;&#xFF0C;&#x5219;&#x9700;&#x8981;&#x521B;&#x5EFA;HDFS&#x6839;&#x8DEF;&#x5F84;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong>&#xFF0C;&#x5426;&#x5219;&#x5FFD;&#x7565;&#x6B64;&#x6B65;&#x9AA4;</p>
<h3 id="3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;">3&#xFF0C;&#x9879;&#x76EE;&#x7F16;&#x8BD1;</h3>
<p>&#x200B; &#x5982;&#x4E0A;&#x8FDB;&#x884C; <strong>&#x9879;&#x76EE;&#x7F16;&#x8BD1;</strong></p>
<h3 id="4&#xFF0C;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;">4&#xFF0C;&#x5C06;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230;&#x6307;&#x5B9A;&#x76EE;&#x5F55;</h3>
<p>&#x200B; &#x5C06;<strong>.escheduler_env.sh</strong> &#x548C; <strong>escheduler_env.py</strong> &#x4E24;&#x4E2A;&#x73AF;&#x5883;&#x53D8;&#x91CF;&#x6587;&#x4EF6;&#x590D;&#x5236;&#x5230; <strong>common/common.properties</strong>&#x914D;&#x7F6E;&#x7684;<strong>escheduler.env.path</strong> &#x548C; <strong>escheduler.env.py</strong> &#x7684;&#x76EE;&#x5F55;&#x4E0B;&#xFF0C;&#x5E76;&#x5C06; <strong>owner</strong> &#x4FEE;&#x6539;&#x4E3A;<strong>&#x90E8;&#x7F72;&#x7528;&#x6237;</strong></p>
<h3 id="5&#xFF0C;&#x4FEE;&#x6539;-installsh">5&#xFF0C;&#x4FEE;&#x6539; install.sh</h3>
<p>&#x200B; &#x4FEE;&#x6539; install.sh &#x4E2D;&#x53D8;&#x91CF;&#x7684;&#x503C;&#xFF0C;&#x66FF;&#x6362;&#x6210;&#x81EA;&#x8EAB;&#x4E1A;&#x52A1;&#x6240;&#x9700;&#x7684;&#x503C;</p>
<h3 id="6&#xFF0C;&#x4E00;&#x952E;&#x90E8;&#x7F72;">6&#xFF0C;&#x4E00;&#x952E;&#x90E8;&#x7F72;</h3>
<ul>
<li>&#x5B89;&#x88C5; pip install kazoo</li>
<li>&#x5B89;&#x88C5;&#x76EE;&#x5F55;&#x5982;&#x4E0B;&#xFF1A;</li>
</ul>
<pre><code> bin
conf
escheduler-1.0.0-SNAPSHOT.tar.gz
install.sh
lib
monitor_server.py
script
sql
</code></pre><ul>
<li><p>&#x4F7F;&#x7528;&#x90E8;&#x7F72;&#x7528;&#x6237; sh install.sh &#x4E00;&#x952E;&#x90E8;&#x7F72;</p>
<ul>
<li>&#x6CE8;&#x610F;&#xFF1A;scp_hosts.sh &#x91CC; <code>tar -zxvf $workDir/../escheduler-1.0.0.tar.gz -C $installPath</code> &#x4E2D;&#x7684;&#x7248;&#x672C;&#x53F7;(1.0.0)&#x9700;&#x8981;&#x6267;&#x884C;&#x524D;&#x624B;&#x52A8;&#x66FF;&#x6362;&#x6210;&#x5BF9;&#x5E94;&#x7684;&#x7248;&#x672C;&#x53F7;</li>
</ul>
</li>
</ul>
<h2 id="&#x670D;&#x52A1;&#x76D1;&#x63A7;">&#x670D;&#x52A1;&#x76D1;&#x63A7;</h2>
<p>monitor_server.py &#x811A;&#x672C;&#x662F;&#x76D1;&#x542C;&#xFF0C;master&#x548C;worker&#x670D;&#x52A1;&#x6302;&#x6389;&#x91CD;&#x542F;&#x7684;&#x811A;&#x672C;</p>
<p>&#x6CE8;&#x610F;&#xFF1A;&#x5728;&#x5168;&#x90E8;&#x670D;&#x52A1;&#x90FD;&#x542F;&#x52A8;&#x4E4B;&#x540E;&#x542F;&#x52A8;</p>
<p>nohup python -u monitor_server.py &gt; nohup.out 2&gt;&amp;1 &amp;</p>
<h2 id="&#x65E5;&#x5FD7;&#x67E5;&#x770B;">&#x65E5;&#x5FD7;&#x67E5;&#x770B;</h2>
<p>&#x65E5;&#x5FD7;&#x7EDF;&#x4E00;&#x5B58;&#x653E;&#x4E8E;&#x6307;&#x5B9A;&#x6587;&#x4EF6;&#x5939;&#x5185;</p>
<pre><code class="lang-&#x65E5;&#x5FD7;&#x8DEF;&#x5F84;"> logs/
&#x251C;&#x2500;&#x2500; escheduler-alert-server.log
&#x251C;&#x2500;&#x2500; escheduler-master-server.log
|&#x2014;&#x2014; escheduler-worker-server.log
|&#x2014;&#x2014; escheduler-api-server.log
|&#x2014;&#x2014; escheduler-logger-server.log
</code></pre> </code></pre>
</section> </section>
</div> </div>
@ -899,7 +612,7 @@ sh ./bin/escheduler-daemon.sh stop alert-server
<script> <script>
var gitbook = gitbook || []; var gitbook = gitbook || [];
gitbook.push(function() { gitbook.push(function() {
gitbook.page.hasChanged({"page":{"title":"后端部署文档","level":"1.3.1","depth":2,"next":{"title":"系统使用手册","level":"1.4","depth":1,"anchor":"#使用手册","path":"系统使用手册.md","ref":"系统使用手册.md#使用手册","articles":[]},"previous":{"title":"后端部署文档","level":"1.3","depth":1,"ref":"","articles":[{"title":"后端部署文档","level":"1.3.1","depth":2,"anchor":"#部署文档","path":"后端部署文档.md","ref":"后端部署文档.md#部署文档","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"后端部署文档.md","mtime":"2019-04-08T08:09:31.074Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}}); gitbook.page.hasChanged({"page":{"title":"后端部署文档","level":"1.3.1","depth":2,"next":{"title":"系统使用手册","level":"1.4","depth":1,"anchor":"#使用手册","path":"系统使用手册.md","ref":"系统使用手册.md#使用手册","articles":[]},"previous":{"title":"后端部署文档","level":"1.3","depth":1,"ref":"","articles":[{"title":"后端部署文档","level":"1.3.1","depth":2,"anchor":"#部署文档","path":"后端部署文档.md","ref":"后端部署文档.md#部署文档","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"后端部署文档.md","mtime":"2019-04-12T03:01:32.518Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
}); });
</script> </script>
</div> </div>

2
docs/zh_CN/前端部署文档.md

@ -45,7 +45,7 @@ API_BASE = http://192.168.220.204:12345
### 2.自动化部署` ### 2.自动化部署
在项目`escheduler-ui`根目录编辑安装文件`vi install(线上环境).sh` 在项目`escheduler-ui`根目录编辑安装文件`vi install(线上环境).sh`

473
docs/zh_CN/后端部署文档.md

@ -6,7 +6,7 @@
* [Mysql](https://blog.csdn.net/u011886447/article/details/79796802) (5.5+) : 必装 * [Mysql](https://blog.csdn.net/u011886447/article/details/79796802) (5.5+) : 必装
* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : 必装 * [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : 必装
* [ZooKeeper](https://www.jianshu.com/p/de90172ea680)(3.4.6) :必装 * [ZooKeeper](https://www.jianshu.com/p/de90172ea680)(3.4.6) :必装
* [Hadoop](https://blog.csdn.net/Evankaka/article/details/51612437)(2.7.3) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上) * [Hadoop](https://blog.csdn.net/Evankaka/article/details/51612437)(2.6+) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上)
* [Hive](https://staroon.pro/2017/12/09/HiveInstall/)(1.2.1) : 选装,hive任务提交需要安装 * [Hive](https://staroon.pro/2017/12/09/HiveInstall/)(1.2.1) : 选装,hive任务提交需要安装
* Spark(1.x,2.x) : 选装,Spark任务提交需要安装 * Spark(1.x,2.x) : 选装,Spark任务提交需要安装
* PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装 * PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装
@ -27,15 +27,6 @@
正常编译完后,会在当前目录生成 target/escheduler-{version}/ 正常编译完后,会在当前目录生成 target/escheduler-{version}/
```
bin
conf
lib
script
sql
install.sh
```
- 说明 - 说明
``` ```
@ -74,7 +65,7 @@ mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
## 创建部署用户 ## 创建部署用户
因为escheduler worker都是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。 - 在所有需要部署调度的机器上创建部署用户,因为worker服务是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。
```部署账号 ```部署账号
vi /etc/sudoers vi /etc/sudoers
@ -86,386 +77,73 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
#Default requiretty #Default requiretty
``` ```
## 配置文件说明 ## ssh免密配置
在部署机器和其他安装机器上配置ssh免密登录,如果要在部署机上安装调度,需要配置本机免密登录自己
```
说明:配置文件位于 target/escheduler-{version}/conf 下面
```
### escheduler-alert
配置邮件告警信息
* alert.properties
```
#以qq邮箱为例,如果是别的邮箱,请更改对应配置
#alert type is EMAIL/SMS
alert.type=EMAIL
# mail server configuration
mail.protocol=SMTP
mail.server.host=smtp.exmail.qq.com
mail.server.port=25
mail.sender=xxxxxxx@qq.com
mail.passwd=xxxxxxx
# xls file path, need manually create it before use if not exist
xls.file.path=/opt/xls
```
### escheduler-common
通用配置文件配置,队列选择及地址配置,通用文件目录配置
- common/common.properties
```
#task queue implementation, default "zookeeper"
escheduler.queue.impl=zookeeper
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
data.basedir.path=/tmp/escheduler
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
data.download.basedir.path=/tmp/escheduler/download
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
process.exec.basepath=/tmp/escheduler/exec
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
data.store2hdfs.basepath=/escheduler
# whether hdfs starts
hdfs.startup.state=true
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
escheduler.env.path=/opt/.escheduler_env.sh
escheduler.env.py=/opt/escheduler_env.py
#resource.view.suffixs
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
# is development state? default "false"
development.state=false
```
SHELL任务 环境变量配置
```
说明:配置文件位于 target/escheduler-{version}/conf/env 下面,这个会是Worker执行任务时加载的环境
```
.escheduler_env.sh
```
export HADOOP_HOME=/opt/soft/hadoop
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
export SPARK_HOME1=/opt/soft/spark1
export SPARK_HOME2=/opt/soft/spark2
export PYTHON_HOME=/opt/soft/python
export JAVA_HOME=/opt/soft/java
export HIVE_HOME=/opt/soft/hive
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
```
Python任务 环境变量配置
```
说明:配置文件位于 target/escheduler-{version}/conf/env 下面
```
escheduler_env.py
```
import os
HADOOP_HOME="/opt/soft/hadoop"
SPARK_HOME1="/opt/soft/spark1"
SPARK_HOME2="/opt/soft/spark2"
PYTHON_HOME="/opt/soft/python"
JAVA_HOME="/opt/soft/java"
HIVE_HOME="/opt/soft/hive"
PATH=os.environ['PATH']
PATH="%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s"%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
os.putenv('PATH','%s'%PATH)
```
hadoop 配置文件
- common/hadoop/hadoop.properties
```
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
fs.defaultFS=hdfs://mycluster:8020
#resourcemanager ha note this need ips , this empty if single
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
```
定时器配置文件
- quartz.properties
```
#============================================================================
# Configure Main Scheduler Properties
#============================================================================
org.quartz.scheduler.instanceName = EasyScheduler
org.quartz.scheduler.instanceId = AUTO
org.quartz.scheduler.makeSchedulerThreadDaemon = true
org.quartz.jobStore.useProperties = false
#============================================================================
# Configure ThreadPool
#============================================================================
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.makeThreadsDaemons = true
org.quartz.threadPool.threadCount = 25
org.quartz.threadPool.threadPriority = 5
#============================================================================
# Configure JobStore
#============================================================================
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.isClustered = true
org.quartz.jobStore.misfireThreshold = 60000
org.quartz.jobStore.clusterCheckinInterval = 5000
org.quartz.jobStore.dataSource = myDs
#============================================================================
# Configure Datasources
#============================================================================
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8&useSSL=false
org.quartz.dataSource.myDs.user = xx
org.quartz.dataSource.myDs.password = xx
org.quartz.dataSource.myDs.maxConnections = 10
org.quartz.dataSource.myDs.validationQuery = select 1
```
zookeeper 配置文件
- zookeeper.properties
```
#zookeeper cluster
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
#escheduler root directory
zookeeper.escheduler.root=/escheduler
#zookeeper server dirctory
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
zookeeper.escheduler.masters=/escheduler/masters
zookeeper.escheduler.workers=/escheduler/workers
#zookeeper lock dirctory
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
#escheduler failover directory
zookeeper.escheduler.lock.masters.failover=/escheduler/lock/failover/masters
zookeeper.escheduler.lock.workers.failover=/escheduler/lock/failover/workers
#escheduler failover directory
zookeeper.session.timeout=300
zookeeper.connection.timeout=300
zookeeper.retry.sleep=1000
zookeeper.retry.maxtime=5
```
### escheduler-dao
dao数据源配置
- dao/data_source.properties
```
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8
spring.datasource.username=xx
spring.datasource.password=xx
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
```
- [将 **主机器** 和各个其它机器SSH打通](http://geek.analysys.cn/topic/113)
### escheduler-server ## 部署
master配置文件 ### 1. 修改安装目录权限
- master.properties - 安装目录如下:
``` ```
# master execute thread num bin
master.exec.threads=100 conf
install.sh
# master execute task number in parallel lib
master.exec.task.number=20 script
sql
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=100
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=10
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=1
``` ```
- 修改权限(deployUser修改为对应部署用户)
`sudo chown -R deployUser:deployUser *`
### 2. 修改环境变量文件
worker配置文件 - 根据业务需求,修改conf/env/目录下的**escheduler_env.py**,**.escheduler_env.sh**两个文件中的环境变量
- worker.properties ### 3. 修改部署参数
``` - 修改 **install.sh**中的参数,替换成自身业务所需的值
# worker execute thread num
worker.exec.threads=100
# worker heartbeat interval - 如果使用hdfs相关功能,需要拷贝**hdfs-site.xml**和**core-site.xml**到conf目录下
worker.heartbeat.interval=10
# submit the number of tasks at a time ### 4. 一键部署
worker.fetch.task.num = 10
- 安装zookeeper工具
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 `pip install kazoo`
worker.max.cpuload.avg=10
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=1
```
- 切换到部署用户,一键部署
### escheduler-api `sh install.sh`
web配置文件 - jps查看服务是否启动
- application.properties
```aidl
MasterServer ----- master服务
WorkerServer ----- worker服务
LoggerServer ----- logger服务
ApiApplicationServer ----- api服务
AlertServer ----- alert服务
``` ```
# server port
server.port=12345
# session config
server.session.timeout=7200
server.context-path=/escheduler/ ## 日志查看
日志统一存放于指定文件夹内
# file size limit for upload
spring.http.multipart.max-file-size=1024MB
spring.http.multipart.max-request-size=1024MB
# post content ```日志路径
server.max-http-post-size=5000000 logs/
├── escheduler-alert-server.log
├── escheduler-master-server.log
|—— escheduler-worker-server.log
|—— escheduler-api-server.log
|—— escheduler-logger-server.log
``` ```
## 启停服务
## 伪分布式部署
### 1,创建部署用户
如上 **创建部署用户**
### 2,根据实际需求来创建HDFS根路径
根据 **common/common.properties****hdfs.startup.state** 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 **owner** 修改为**部署用户**,否则忽略此步骤
### 3,项目编译
如上进行 **项目编译**
### 4,修改配置文件
根据 **配置文件说明** 修改配置文件和 **环境变量** 文件
### 5,创建目录并将环境变量文件复制到指定目录
- 创建 **common/common.properties** 下的data.basedir.path、data.download.basedir.path和process.exec.basepath路径
- 将**.escheduler_env.sh** 和 **escheduler_env.py** 两个环境变量文件复制到 **common/common.properties**配置的**escheduler.env.path****escheduler.env.py** 的目录下,并将 **owner** 修改为**部署用户**
### 6,启停服务
* 启停Master * 启停Master
@ -500,68 +178,3 @@ sh ./bin/escheduler-daemon.sh start alert-server
sh ./bin/escheduler-daemon.sh stop alert-server sh ./bin/escheduler-daemon.sh stop alert-server
``` ```
## 分布式部署
### 1,创建部署用户
- 在需要部署调度的机器上如上 **创建部署用户**
- [将 **主机器** 和各个其它机器SSH打通](https://blog.csdn.net/thinkmore1314/article/details/22489203)
### 2,根据实际需求来创建HDFS根路径
根据 **common/common.properties****hdfs.startup.state** 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 **owner** 修改为**部署用户**,否则忽略此步骤
### 3,项目编译
如上进行 **项目编译**
### 4,将环境变量文件复制到指定目录
将**.escheduler_env.sh** 和 **escheduler_env.py** 两个环境变量文件复制到 **common/common.properties**配置的**escheduler.env.path****escheduler.env.py** 的目录下,并将 **owner** 修改为**部署用户**
### 5,修改 install.sh
修改 install.sh 中变量的值,替换成自身业务所需的值
### 6,一键部署
- 安装 pip install kazoo
- 安装目录如下:
```
bin
conf
escheduler-1.0.0-SNAPSHOT.tar.gz
install.sh
lib
monitor_server.py
script
sql
```
- 使用部署用户 sh install.sh 一键部署
- 注意:scp_hosts.sh 里 `tar -zxvf $workDir/../escheduler-1.0.0.tar.gz -C $installPath` 中的版本号(1.0.0)需要执行前手动替换成对应的版本号
## 服务监控
monitor_server.py 脚本是监听,master和worker服务挂掉重启的脚本
注意:在全部服务都启动之后启动
nohup python -u monitor_server.py > nohup.out 2>&1 &
## 日志查看
日志统一存放于指定文件夹内
```日志路径
logs/
├── escheduler-alert-server.log
├── escheduler-master-server.log
|—— escheduler-worker-server.log
|—— escheduler-api-server.log
|—— escheduler-logger-server.log
```

26
escheduler-ui/src/js/conf/home/pages/security/pages/queue/_source/createQueue.vue

@ -66,21 +66,39 @@
if (this.item) { if (this.item) {
param.id = this.item.id param.id = this.item.id
} }
this._verifyName(param).then(() => {
this.$refs['popup'].spinnerLoading = true let $then = (res) => {
this.store.dispatch(`security/${this.item ? 'updateQueueQ' : 'createQueueQ'}`, param).then(res => {
this.$emit('onUpdate') this.$emit('onUpdate')
this.$message.success(res.msg) this.$message.success(res.msg)
setTimeout(() => { setTimeout(() => {
this.$refs['popup'].spinnerLoading = false this.$refs['popup'].spinnerLoading = false
}, 800) }, 800)
}).catch(e => { }
let $catch = (e) => {
this.$message.error(e.msg || '') this.$message.error(e.msg || '')
this.$refs['popup'].spinnerLoading = false this.$refs['popup'].spinnerLoading = false
}
if (this.item) {
this.$refs['popup'].spinnerLoading = true
this.store.dispatch(`security/updateQueueQ`, param).then(res => {
$then(res)
}).catch(e => {
$catch(e)
})
}else{
this._verifyName(param).then(() => {
this.$refs['popup'].spinnerLoading = true
this.store.dispatch(`security/createQueueQ`, param).then(res => {
$then(res)
}).catch(e => {
$catch(e)
}) })
}).catch(e => { }).catch(e => {
this.$message.error(e.msg || '') this.$message.error(e.msg || '')
}) })
}
}, },
_verification(){ _verification(){

115
install.sh

@ -47,8 +47,57 @@ mysqlUserName="xx"
# mysql 密码 # mysql 密码
mysqlPassword="xx" mysqlPassword="xx"
# conf/config/install_config.conf配置
# 安装路径,不要当前路径(pwd)一样
installPath="/data1_1T/escheduler"
# 部署用户
deployUser="escheduler"
# zk集群
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# 安装hosts
ips="ark0,ark1,ark2,ark3,ark4"
# conf/config/run_config.conf配置
# 运行Master的机器
masters="ark0,ark1"
# 运行Worker的机器
workers="ark2,ark3,ark4"
# 运行Alert的机器
alertServer="ark3"
# 运行Api的机器
apiServers="ark1"
# alert配置
# 邮件协议
mailProtocol="SMTP"
# 邮件服务host
mailServerHost="smtp.exmail.qq.com"
# 邮件服务端口
mailServerPort="25"
# 发送人
mailSender="xxxxxxxxxx"
# 发送人密码
mailPassword="xxxxxxxxxx"
# 下载Excel路径
xlsFilePath="/tmp/xls"
# hadoop 配置 # hadoop 配置
# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数;
# 不启动设置为false,如果为false,以下配置不需要修改
hdfsStartupSate="false"
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下 # namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
namenodeFs="hdfs://mycluster:8020" namenodeFs="hdfs://mycluster:8020"
@ -58,6 +107,8 @@ yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好 # 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
singleYarnIp="ark1" singleYarnIp="ark1"
# hdfs根路径,根路径的owner必须是部署用户
hdfsPath="/escheduler"
# common 配置 # common 配置
# 程序路径 # 程序路径
@ -69,17 +120,11 @@ downloadPath="/tmp/escheduler/download"
# 任务执行路径 # 任务执行路径
execPath="/tmp/escheduler/exec" execPath="/tmp/escheduler/exec"
# hdfs根路径
hdfsPath="/escheduler"
# 是否启动hdfs,如果启动则为true,不启动设置为false
hdfsStartupSate="true"
# SHELL环境变量路径 # SHELL环境变量路径
shellEnvPath="/opt/.escheduler_env.sh" shellEnvPath="$installPath/conf/env/.escheduler_env.sh"
# Python换将变量路径 # Python换将变量路径
pythonEnvPath="/opt/escheduler_env.py" pythonEnvPath="$installPath/conf/env/escheduler_env.py"
# 资源文件的后缀 # 资源文件的后缀
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml" resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
@ -87,11 +132,7 @@ resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除 # 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
devState="true" devState="true"
# zk 配置 # zk 配置
# zk集群
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# zk根目录 # zk根目录
zkRoot="/escheduler" zkRoot="/escheduler"
@ -168,7 +209,6 @@ workerMaxCupLoadAvg="10"
# worker预留内存,用来判断master是否还有执行能力 # worker预留内存,用来判断master是否还有执行能力
workerReservedMemory="1" workerReservedMemory="1"
# api 配置 # api 配置
# api 服务端口 # api 服务端口
apiServerPort="12345" apiServerPort="12345"
@ -188,53 +228,6 @@ springMaxRequestSize="1024MB"
# api 最大post请求大小 # api 最大post请求大小
apiMaxHttpPostSize="5000000" apiMaxHttpPostSize="5000000"
# alert配置
# 邮件协议
mailProtocol="SMTP"
# 邮件服务host
mailServerHost="smtp.exmail.qq.com"
# 邮件服务端口
mailServerPort="25"
# 发送人
mailSender="xxxxxxxxxx"
# 发送人密码
mailPassword="xxxxxxxxxx"
# 下载Excel路径
xlsFilePath="/opt/xls"
# conf/config/install_config.conf配置
# 安装路径
installPath="/data1_1T/escheduler"
# 部署用户
deployUser="escheduler"
# 安装hosts
ips="ark0,ark1,ark2,ark3,ark4"
# conf/config/run_config.conf配置
# 运行Master的机器
masters="ark0,ark1"
# 运行Worker的机器
workers="ark2,ark3,ark4"
# 运行Alert的机器
alertServer="ark3"
# 运行Api的机器
apiServers="ark1"
# 1,替换文件 # 1,替换文件
echo "1,替换文件" echo "1,替换文件"
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
@ -317,8 +310,6 @@ sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_con
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
# 2,创建目录 # 2,创建目录
echo "2,创建目录" echo "2,创建目录"

0
monitor_server.py → script/monitor_server.py

2
script/scp_hosts.sh

@ -5,8 +5,6 @@ workDir=`cd ${workDir};pwd`
source $workDir/../conf/config/run_config.conf source $workDir/../conf/config/run_config.conf
source $workDir/../conf/config/install_config.conf source $workDir/../conf/config/install_config.conf
tar -zxvf $workDir/../EasyScheduler-1.0.0.tar.gz -C $installPath
hostsArr=(${ips//,/ }) hostsArr=(${ips//,/ })
for host in ${hostsArr[@]} for host in ${hostsArr[@]}
do do

Loading…
Cancel
Save