mirror of
https://gitee.com/dolphinscheduler/DolphinScheduler.git
synced 2024-12-04 13:17:50 +08:00
Merge remote-tracking branch 'upstream/dev' into dev
This commit is contained in:
commit
81c39f1053
@ -469,7 +469,7 @@ API_BASE = http://192.168.220.204:12345
|
|||||||
<li><p><code>npm run build</code> 项目打包 (打包后根目录会创建一个名为dist文件夹,用于发布线上Nginx)</p>
|
<li><p><code>npm run build</code> 项目打包 (打包后根目录会创建一个名为dist文件夹,用于发布线上Nginx)</p>
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
<h3 id="2自动化部署">2.自动化部署`</h3>
|
<h3 id="2自动化部署">2.自动化部署</h3>
|
||||||
<p>在项目<code>escheduler-ui</code>根目录编辑安装文件<code>vi install(线上环境).sh</code></p>
|
<p>在项目<code>escheduler-ui</code>根目录编辑安装文件<code>vi install(线上环境).sh</code></p>
|
||||||
<p>更改前端访问端口和后端代理接口地址</p>
|
<p>更改前端访问端口和后端代理接口地址</p>
|
||||||
<pre><code># 配置前端访问端口
|
<pre><code># 配置前端访问端口
|
||||||
@ -604,7 +604,7 @@ client_max_body_size 1024m
|
|||||||
<script>
|
<script>
|
||||||
var gitbook = gitbook || [];
|
var gitbook = gitbook || [];
|
||||||
gitbook.push(function() {
|
gitbook.push(function() {
|
||||||
gitbook.page.hasChanged({"page":{"title":"环境搭建","level":"1.2.1","depth":2,"next":{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},"previous":{"title":"前端部署文档","level":"1.2","depth":1,"ref":"","articles":[{"title":"环境搭建","level":"1.2.1","depth":2,"anchor":"#前端项目环境构建及编译","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目环境构建及编译","articles":[]},{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},{"title":"项目生产环境Nginx配置","level":"1.2.3","depth":2,"anchor":"#项目生产环境配置","path":"前端部署文档.md","ref":"前端部署文档.md#项目生产环境配置","articles":[]},{"title":"前端项目发布","level":"1.2.4","depth":2,"anchor":"#前端项目发布","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目发布","articles":[]},{"title":"问题","level":"1.2.5","depth":2,"anchor":"#问题","path":"前端部署文档.md","ref":"前端部署文档.md#问题","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"前端部署文档.md","mtime":"2019-04-12T01:30:07.632Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
|
gitbook.page.hasChanged({"page":{"title":"环境搭建","level":"1.2.1","depth":2,"next":{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},"previous":{"title":"前端部署文档","level":"1.2","depth":1,"ref":"","articles":[{"title":"环境搭建","level":"1.2.1","depth":2,"anchor":"#前端项目环境构建及编译","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目环境构建及编译","articles":[]},{"title":"安装及配置","level":"1.2.2","depth":2,"anchor":"#安装及配置","path":"前端部署文档.md","ref":"前端部署文档.md#安装及配置","articles":[]},{"title":"项目生产环境Nginx配置","level":"1.2.3","depth":2,"anchor":"#项目生产环境配置","path":"前端部署文档.md","ref":"前端部署文档.md#项目生产环境配置","articles":[]},{"title":"前端项目发布","level":"1.2.4","depth":2,"anchor":"#前端项目发布","path":"前端部署文档.md","ref":"前端部署文档.md#前端项目发布","articles":[]},{"title":"问题","level":"1.2.5","depth":2,"anchor":"#问题","path":"前端部署文档.md","ref":"前端部署文档.md#问题","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"前端部署文档.md","mtime":"2019-04-12T03:16:34.222Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
|
||||||
});
|
});
|
||||||
</script>
|
</script>
|
||||||
</div>
|
</div>
|
||||||
|
@ -435,7 +435,7 @@
|
|||||||
<li><a href="https://blog.csdn.net/u011886447/article/details/79796802" target="_blank">Mysql</a> (5.5+) : 必装</li>
|
<li><a href="https://blog.csdn.net/u011886447/article/details/79796802" target="_blank">Mysql</a> (5.5+) : 必装</li>
|
||||||
<li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html" target="_blank">JDK</a> (1.8+) : 必装</li>
|
<li><a href="https://www.oracle.com/technetwork/java/javase/downloads/index.html" target="_blank">JDK</a> (1.8+) : 必装</li>
|
||||||
<li><a href="https://www.jianshu.com/p/de90172ea680" target="_blank">ZooKeeper</a>(3.4.6) :必装 </li>
|
<li><a href="https://www.jianshu.com/p/de90172ea680" target="_blank">ZooKeeper</a>(3.4.6) :必装 </li>
|
||||||
<li><a href="https://blog.csdn.net/Evankaka/article/details/51612437" target="_blank">Hadoop</a>(2.7.3) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上)</li>
|
<li><a href="https://blog.csdn.net/Evankaka/article/details/51612437" target="_blank">Hadoop</a>(2.6+) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上)</li>
|
||||||
<li><a href="https://staroon.pro/2017/12/09/HiveInstall/" target="_blank">Hive</a>(1.2.1) : 选装,hive任务提交需要安装</li>
|
<li><a href="https://staroon.pro/2017/12/09/HiveInstall/" target="_blank">Hive</a>(1.2.1) : 选装,hive任务提交需要安装</li>
|
||||||
<li>Spark(1.x,2.x) : 选装,Spark任务提交需要安装</li>
|
<li>Spark(1.x,2.x) : 选装,Spark任务提交需要安装</li>
|
||||||
<li>PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装</li>
|
<li>PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装</li>
|
||||||
@ -450,13 +450,7 @@
|
|||||||
<li>查看目录</li>
|
<li>查看目录</li>
|
||||||
</ul>
|
</ul>
|
||||||
<p>正常编译完后,会在当前目录生成 target/escheduler-{version}/</p>
|
<p>正常编译完后,会在当前目录生成 target/escheduler-{version}/</p>
|
||||||
<pre><code> bin
|
<ul>
|
||||||
conf
|
|
||||||
lib
|
|
||||||
script
|
|
||||||
sql
|
|
||||||
install.sh
|
|
||||||
</code></pre><ul>
|
|
||||||
<li>说明</li>
|
<li>说明</li>
|
||||||
</ul>
|
</ul>
|
||||||
<pre><code>bin : 基础服务启动脚本
|
<pre><code>bin : 基础服务启动脚本
|
||||||
@ -483,7 +477,9 @@ mysql -h {host} -u {user} -p{password} -D {db} < escheduler.sql
|
|||||||
|
|
||||||
mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
|
mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
|
||||||
</code></pre><h2 id="创建部署用户">创建部署用户</h2>
|
</code></pre><h2 id="创建部署用户">创建部署用户</h2>
|
||||||
<p>因为escheduler worker都是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。</p>
|
<ul>
|
||||||
|
<li>在所有需要部署调度的机器上创建部署用户,因为worker服务是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。</li>
|
||||||
|
</ul>
|
||||||
<pre><code class="lang-部署账号">vi /etc/sudoers
|
<pre><code class="lang-部署账号">vi /etc/sudoers
|
||||||
|
|
||||||
# 部署用户是 escheduler 账号
|
# 部署用户是 escheduler 账号
|
||||||
@ -492,301 +488,65 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
|
|||||||
# 并且需要注释掉 Default requiretty 一行
|
# 并且需要注释掉 Default requiretty 一行
|
||||||
#Default requiretty
|
#Default requiretty
|
||||||
</code></pre>
|
</code></pre>
|
||||||
<h2 id="配置文件说明">配置文件说明</h2>
|
<h2 id="ssh免密配置">ssh免密配置</h2>
|
||||||
<pre><code>说明:配置文件位于 target/escheduler-{version}/conf 下面
|
<p> 在部署机器和其他安装机器上配置ssh免密登录,如果要在部署机上安装调度,需要配置本机免密登录自己</p>
|
||||||
</code></pre><h3 id="escheduler-alert">escheduler-alert</h3>
|
|
||||||
<p>配置邮件告警信息</p>
|
|
||||||
<ul>
|
<ul>
|
||||||
<li>alert.properties </li>
|
<li><a href="http://geek.analysys.cn/topic/113" target="_blank">将 <strong>主机器</strong> 和各个其它机器SSH打通</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
<pre><code>#以qq邮箱为例,如果是别的邮箱,请更改对应配置
|
<h2 id="部署">部署</h2>
|
||||||
#alert type is EMAIL/SMS
|
<h3 id="1-修改安装目录权限">1. 修改安装目录权限</h3>
|
||||||
alert.type=EMAIL
|
|
||||||
|
|
||||||
# mail server configuration
|
|
||||||
mail.protocol=SMTP
|
|
||||||
mail.server.host=smtp.exmail.qq.com
|
|
||||||
mail.server.port=25
|
|
||||||
mail.sender=xxxxxxx@qq.com
|
|
||||||
mail.passwd=xxxxxxx
|
|
||||||
|
|
||||||
# xls file path, need manually create it before use if not exist
|
|
||||||
xls.file.path=/opt/xls
|
|
||||||
</code></pre><h3 id="escheduler-common">escheduler-common</h3>
|
|
||||||
<p>通用配置文件配置,队列选择及地址配置,通用文件目录配置</p>
|
|
||||||
<ul>
|
<ul>
|
||||||
<li>common/common.properties</li>
|
<li>安装目录如下:</li>
|
||||||
</ul>
|
</ul>
|
||||||
<pre><code>#task queue implementation, default "zookeeper"
|
<pre><code> bin
|
||||||
escheduler.queue.impl=zookeeper
|
conf
|
||||||
|
install.sh
|
||||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
|
lib
|
||||||
data.basedir.path=/tmp/escheduler
|
script
|
||||||
|
sql
|
||||||
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
|
</code></pre><ul>
|
||||||
data.download.basedir.path=/tmp/escheduler/download
|
<li><p>修改权限(deployUser修改为对应部署用户)</p>
|
||||||
|
<p> <code>sudo chown -R deployUser:deployUser *</code></p>
|
||||||
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
|
|
||||||
process.exec.basepath=/tmp/escheduler/exec
|
|
||||||
|
|
||||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
|
|
||||||
data.store2hdfs.basepath=/escheduler
|
|
||||||
|
|
||||||
# whether hdfs starts
|
|
||||||
hdfs.startup.state=true
|
|
||||||
|
|
||||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
|
||||||
escheduler.env.path=/opt/.escheduler_env.sh
|
|
||||||
escheduler.env.py=/opt/escheduler_env.py
|
|
||||||
|
|
||||||
#resource.view.suffixs
|
|
||||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
|
|
||||||
|
|
||||||
# is development state? default "false"
|
|
||||||
development.state=false
|
|
||||||
</code></pre><p>SHELL任务 环境变量配置</p>
|
|
||||||
<pre><code>说明:配置文件位于 target/escheduler-{version}/conf/env 下面,这个会是Worker执行任务时加载的环境
|
|
||||||
</code></pre><p>.escheduler_env.sh </p>
|
|
||||||
<pre><code>export HADOOP_HOME=/opt/soft/hadoop
|
|
||||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
|
||||||
export SPARK_HOME1=/opt/soft/spark1
|
|
||||||
export SPARK_HOME2=/opt/soft/spark2
|
|
||||||
export PYTHON_HOME=/opt/soft/python
|
|
||||||
export JAVA_HOME=/opt/soft/java
|
|
||||||
export HIVE_HOME=/opt/soft/hive
|
|
||||||
|
|
||||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
|
|
||||||
</code></pre><p>​ </p>
|
|
||||||
<p>Python任务 环境变量配置</p>
|
|
||||||
<pre><code>说明:配置文件位于 target/escheduler-{version}/conf/env 下面
|
|
||||||
</code></pre><p>escheduler_env.py</p>
|
|
||||||
<pre><code>import os
|
|
||||||
|
|
||||||
HADOOP_HOME="/opt/soft/hadoop"
|
|
||||||
SPARK_HOME1="/opt/soft/spark1"
|
|
||||||
SPARK_HOME2="/opt/soft/spark2"
|
|
||||||
PYTHON_HOME="/opt/soft/python"
|
|
||||||
JAVA_HOME="/opt/soft/java"
|
|
||||||
HIVE_HOME="/opt/soft/hive"
|
|
||||||
PATH=os.environ['PATH']
|
|
||||||
PATH="%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s"%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
|
|
||||||
|
|
||||||
os.putenv('PATH','%s'%PATH)
|
|
||||||
</code></pre><p>hadoop 配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>common/hadoop/hadoop.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code># ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
|
|
||||||
fs.defaultFS=hdfs://mycluster:8020
|
|
||||||
|
|
||||||
#resourcemanager ha note this need ips , this empty if single
|
|
||||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
|
||||||
|
|
||||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
|
|
||||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
|
||||||
</code></pre><p>定时器配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>quartz.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code>#============================================================================
|
|
||||||
# Configure Main Scheduler Properties
|
|
||||||
#============================================================================
|
|
||||||
org.quartz.scheduler.instanceName = EasyScheduler
|
|
||||||
org.quartz.scheduler.instanceId = AUTO
|
|
||||||
org.quartz.scheduler.makeSchedulerThreadDaemon = true
|
|
||||||
org.quartz.jobStore.useProperties = false
|
|
||||||
|
|
||||||
#============================================================================
|
|
||||||
# Configure ThreadPool
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
|
|
||||||
org.quartz.threadPool.makeThreadsDaemons = true
|
|
||||||
org.quartz.threadPool.threadCount = 25
|
|
||||||
org.quartz.threadPool.threadPriority = 5
|
|
||||||
|
|
||||||
#============================================================================
|
|
||||||
# Configure JobStore
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
|
|
||||||
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
|
|
||||||
org.quartz.jobStore.tablePrefix = QRTZ_
|
|
||||||
org.quartz.jobStore.isClustered = true
|
|
||||||
org.quartz.jobStore.misfireThreshold = 60000
|
|
||||||
org.quartz.jobStore.clusterCheckinInterval = 5000
|
|
||||||
org.quartz.jobStore.dataSource = myDs
|
|
||||||
|
|
||||||
#============================================================================
|
|
||||||
# Configure Datasources
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
|
|
||||||
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8&useSSL=false
|
|
||||||
org.quartz.dataSource.myDs.user = xx
|
|
||||||
org.quartz.dataSource.myDs.password = xx
|
|
||||||
org.quartz.dataSource.myDs.maxConnections = 10
|
|
||||||
org.quartz.dataSource.myDs.validationQuery = select 1
|
|
||||||
</code></pre><p>zookeeper 配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>zookeeper.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code>#zookeeper cluster
|
|
||||||
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
|
|
||||||
|
|
||||||
#escheduler root directory
|
|
||||||
zookeeper.escheduler.root=/escheduler
|
|
||||||
|
|
||||||
#zookeeper server dirctory
|
|
||||||
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
|
|
||||||
zookeeper.escheduler.masters=/escheduler/masters
|
|
||||||
zookeeper.escheduler.workers=/escheduler/workers
|
|
||||||
|
|
||||||
#zookeeper lock dirctory
|
|
||||||
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
|
|
||||||
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
|
|
||||||
|
|
||||||
#escheduler failover directory
|
|
||||||
zookeeper.escheduler.lock.masters.failover=/escheduler/lock/failover/masters
|
|
||||||
zookeeper.escheduler.lock.workers.failover=/escheduler/lock/failover/workers
|
|
||||||
|
|
||||||
#escheduler failover directory
|
|
||||||
zookeeper.session.timeout=300
|
|
||||||
zookeeper.connection.timeout=300
|
|
||||||
zookeeper.retry.sleep=1000
|
|
||||||
zookeeper.retry.maxtime=5
|
|
||||||
</code></pre><h3 id="escheduler-dao">escheduler-dao</h3>
|
|
||||||
<p>dao数据源配置</p>
|
|
||||||
<ul>
|
|
||||||
<li>dao/data_source.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code># base spring data source configuration
|
|
||||||
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
|
|
||||||
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
|
|
||||||
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8
|
|
||||||
spring.datasource.username=xx
|
|
||||||
spring.datasource.password=xx
|
|
||||||
|
|
||||||
# connection configuration
|
|
||||||
spring.datasource.initialSize=5
|
|
||||||
# min connection number
|
|
||||||
spring.datasource.minIdle=5
|
|
||||||
# max connection number
|
|
||||||
spring.datasource.maxActive=50
|
|
||||||
|
|
||||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
|
||||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
|
||||||
spring.datasource.maxWait=60000
|
|
||||||
|
|
||||||
# milliseconds for check to close free connections
|
|
||||||
spring.datasource.timeBetweenEvictionRunsMillis=60000
|
|
||||||
|
|
||||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
|
||||||
spring.datasource.timeBetweenConnectErrorMillis=60000
|
|
||||||
|
|
||||||
# the longest time a connection remains idle without being evicted, in milliseconds
|
|
||||||
spring.datasource.minEvictableIdleTimeMillis=300000
|
|
||||||
|
|
||||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
|
||||||
spring.datasource.validationQuery=SELECT 1
|
|
||||||
#check whether the connection is valid for timeout, in seconds
|
|
||||||
spring.datasource.validationQueryTimeout=3
|
|
||||||
|
|
||||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
|
||||||
# validation Query is performed to check whether the connection is valid
|
|
||||||
spring.datasource.testWhileIdle=true
|
|
||||||
|
|
||||||
#execute validation to check if the connection is valid when applying for a connection
|
|
||||||
spring.datasource.testOnBorrow=true
|
|
||||||
#execute validation to check if the connection is valid when the connection is returned
|
|
||||||
spring.datasource.testOnReturn=false
|
|
||||||
spring.datasource.defaultAutoCommit=true
|
|
||||||
spring.datasource.keepAlive=true
|
|
||||||
|
|
||||||
# open PSCache, specify count PSCache for every connection
|
|
||||||
spring.datasource.poolPreparedStatements=true
|
|
||||||
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
|
|
||||||
</code></pre><h3 id="escheduler-server">escheduler-server</h3>
|
|
||||||
<p>master配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>master.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code># master execute thread num
|
|
||||||
master.exec.threads=100
|
|
||||||
|
|
||||||
# master execute task number in parallel
|
|
||||||
master.exec.task.number=20
|
|
||||||
|
|
||||||
# master heartbeat interval
|
|
||||||
master.heartbeat.interval=10
|
|
||||||
|
|
||||||
# master commit task retry times
|
|
||||||
master.task.commit.retryTimes=5
|
|
||||||
|
|
||||||
# master commit task interval
|
|
||||||
master.task.commit.interval=100
|
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
|
|
||||||
master.max.cpuload.avg=10
|
|
||||||
|
|
||||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
|
|
||||||
master.reserved.memory=1
|
|
||||||
</code></pre><p>worker配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>worker.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code># worker execute thread num
|
|
||||||
worker.exec.threads=100
|
|
||||||
|
|
||||||
# worker heartbeat interval
|
|
||||||
worker.heartbeat.interval=10
|
|
||||||
|
|
||||||
# submit the number of tasks at a time
|
|
||||||
worker.fetch.task.num = 10
|
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
|
|
||||||
worker.max.cpuload.avg=10
|
|
||||||
|
|
||||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
|
|
||||||
worker.reserved.memory=1
|
|
||||||
</code></pre><h3 id="escheduler-api">escheduler-api</h3>
|
|
||||||
<p>web配置文件</p>
|
|
||||||
<ul>
|
|
||||||
<li>application.properties</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code># server port
|
|
||||||
server.port=12345
|
|
||||||
|
|
||||||
# session config
|
|
||||||
server.session.timeout=7200
|
|
||||||
|
|
||||||
server.context-path=/escheduler/
|
|
||||||
|
|
||||||
# file size limit for upload
|
|
||||||
spring.http.multipart.max-file-size=1024MB
|
|
||||||
spring.http.multipart.max-request-size=1024MB
|
|
||||||
|
|
||||||
# post content
|
|
||||||
server.max-http-post-size=5000000
|
|
||||||
</code></pre><h2 id="伪分布式部署">伪分布式部署</h2>
|
|
||||||
<h3 id="1,创建部署用户">1,创建部署用户</h3>
|
|
||||||
<p>​ 如上 <strong>创建部署用户</strong></p>
|
|
||||||
<h3 id="2,根据实际需求来创建hdfs根路径">2,根据实际需求来创建HDFS根路径</h3>
|
|
||||||
<p>​ 根据 <strong>common/common.properties</strong> 中 <strong>hdfs.startup.state</strong> 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 <strong>owner</strong> 修改为<strong>部署用户</strong>,否则忽略此步骤</p>
|
|
||||||
<h3 id="3,项目编译">3,项目编译</h3>
|
|
||||||
<p>​ 如上进行 <strong>项目编译</strong></p>
|
|
||||||
<h3 id="4,修改配置文件">4,修改配置文件</h3>
|
|
||||||
<p>​ 根据 <strong>配置文件说明</strong> 修改配置文件和 <strong>环境变量</strong> 文件</p>
|
|
||||||
<h3 id="5,创建目录并将环境变量文件复制到指定目录">5,创建目录并将环境变量文件复制到指定目录</h3>
|
|
||||||
<ul>
|
|
||||||
<li><p>创建 <strong>common/common.properties</strong> 下的data.basedir.path、data.download.basedir.path和process.exec.basepath路径</p>
|
|
||||||
</li>
|
|
||||||
<li><p>将<strong>.escheduler_env.sh</strong> 和 <strong>escheduler_env.py</strong> 两个环境变量文件复制到 <strong>common/common.properties</strong>配置的<strong>escheduler.env.path</strong> 和 <strong>escheduler.env.py</strong> 的目录下,并将 <strong>owner</strong> 修改为<strong>部署用户</strong></p>
|
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
<h3 id="6,启停服务">6,启停服务</h3>
|
<h3 id="2-修改环境变量文件">2. 修改环境变量文件</h3>
|
||||||
|
<ul>
|
||||||
|
<li>根据业务需求,修改conf/env/目录下的<strong>escheduler_env.py</strong>,<strong>.escheduler_env.sh</strong>两个文件中的环境变量</li>
|
||||||
|
</ul>
|
||||||
|
<h3 id="3-修改部署参数">3. 修改部署参数</h3>
|
||||||
|
<ul>
|
||||||
|
<li><p>修改 <strong>install.sh</strong>中的参数,替换成自身业务所需的值</p>
|
||||||
|
</li>
|
||||||
|
<li><p>如果使用hdfs相关功能,需要拷贝<strong>hdfs-site.xml</strong>和<strong>core-site.xml</strong>到conf目录下</p>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<h3 id="4-一键部署">4. 一键部署</h3>
|
||||||
|
<ul>
|
||||||
|
<li><p>安装zookeeper工具 </p>
|
||||||
|
<p> <code>pip install kazoo</code></p>
|
||||||
|
</li>
|
||||||
|
<li><p>切换到部署用户,一键部署</p>
|
||||||
|
<p> <code>sh install.sh</code> </p>
|
||||||
|
</li>
|
||||||
|
<li><p>jps查看服务是否启动</p>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<pre><code class="lang-aidl"> MasterServer ----- master服务
|
||||||
|
WorkerServer ----- worker服务
|
||||||
|
LoggerServer ----- logger服务
|
||||||
|
ApiApplicationServer ----- api服务
|
||||||
|
AlertServer ----- alert服务
|
||||||
|
</code></pre>
|
||||||
|
<h2 id="日志查看">日志查看</h2>
|
||||||
|
<p>日志统一存放于指定文件夹内</p>
|
||||||
|
<pre><code class="lang-日志路径"> logs/
|
||||||
|
├── escheduler-alert-server.log
|
||||||
|
├── escheduler-master-server.log
|
||||||
|
|—— escheduler-worker-server.log
|
||||||
|
|—— escheduler-api-server.log
|
||||||
|
|—— escheduler-logger-server.log
|
||||||
|
</code></pre>
|
||||||
|
<h2 id="启停服务">启停服务</h2>
|
||||||
<ul>
|
<ul>
|
||||||
<li>启停Master</li>
|
<li>启停Master</li>
|
||||||
</ul>
|
</ul>
|
||||||
@ -813,54 +573,7 @@ sh ./bin/escheduler-daemon.sh stop logger-server
|
|||||||
</ul>
|
</ul>
|
||||||
<pre><code>sh ./bin/escheduler-daemon.sh start alert-server
|
<pre><code>sh ./bin/escheduler-daemon.sh start alert-server
|
||||||
sh ./bin/escheduler-daemon.sh stop alert-server
|
sh ./bin/escheduler-daemon.sh stop alert-server
|
||||||
</code></pre><h2 id="分布式部署">分布式部署</h2>
|
|
||||||
<h3 id="1,创建部署用户">1,创建部署用户</h3>
|
|
||||||
<ul>
|
|
||||||
<li>在需要部署调度的机器上如上 <strong>创建部署用户</strong></li>
|
|
||||||
<li><a href="https://blog.csdn.net/thinkmore1314/article/details/22489203" target="_blank">将 <strong>主机器</strong> 和各个其它机器SSH打通</a></li>
|
|
||||||
</ul>
|
|
||||||
<h3 id="2,根据实际需求来创建hdfs根路径">2,根据实际需求来创建HDFS根路径</h3>
|
|
||||||
<p>​ 根据 <strong>common/common.properties</strong> 中 <strong>hdfs.startup.state</strong> 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 <strong>owner</strong> 修改为<strong>部署用户</strong>,否则忽略此步骤</p>
|
|
||||||
<h3 id="3,项目编译">3,项目编译</h3>
|
|
||||||
<p>​ 如上进行 <strong>项目编译</strong></p>
|
|
||||||
<h3 id="4,将环境变量文件复制到指定目录">4,将环境变量文件复制到指定目录</h3>
|
|
||||||
<p>​ 将<strong>.escheduler_env.sh</strong> 和 <strong>escheduler_env.py</strong> 两个环境变量文件复制到 <strong>common/common.properties</strong>配置的<strong>escheduler.env.path</strong> 和 <strong>escheduler.env.py</strong> 的目录下,并将 <strong>owner</strong> 修改为<strong>部署用户</strong></p>
|
|
||||||
<h3 id="5,修改-installsh">5,修改 install.sh</h3>
|
|
||||||
<p>​ 修改 install.sh 中变量的值,替换成自身业务所需的值</p>
|
|
||||||
<h3 id="6,一键部署">6,一键部署</h3>
|
|
||||||
<ul>
|
|
||||||
<li>安装 pip install kazoo</li>
|
|
||||||
<li>安装目录如下:</li>
|
|
||||||
</ul>
|
|
||||||
<pre><code> bin
|
|
||||||
conf
|
|
||||||
escheduler-1.0.0-SNAPSHOT.tar.gz
|
|
||||||
install.sh
|
|
||||||
lib
|
|
||||||
monitor_server.py
|
|
||||||
script
|
|
||||||
sql
|
|
||||||
</code></pre><ul>
|
|
||||||
<li><p>使用部署用户 sh install.sh 一键部署</p>
|
|
||||||
<ul>
|
|
||||||
<li>注意:scp_hosts.sh 里 <code>tar -zxvf $workDir/../escheduler-1.0.0.tar.gz -C $installPath</code> 中的版本号(1.0.0)需要执行前手动替换成对应的版本号</li>
|
|
||||||
</ul>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
<h2 id="服务监控">服务监控</h2>
|
|
||||||
<p>monitor_server.py 脚本是监听,master和worker服务挂掉重启的脚本</p>
|
|
||||||
<p>注意:在全部服务都启动之后启动</p>
|
|
||||||
<p>nohup python -u monitor_server.py > nohup.out 2>&1 &</p>
|
|
||||||
<h2 id="日志查看">日志查看</h2>
|
|
||||||
<p>日志统一存放于指定文件夹内</p>
|
|
||||||
<pre><code class="lang-日志路径"> logs/
|
|
||||||
├── escheduler-alert-server.log
|
|
||||||
├── escheduler-master-server.log
|
|
||||||
|—— escheduler-worker-server.log
|
|
||||||
|—— escheduler-api-server.log
|
|
||||||
|—— escheduler-logger-server.log
|
|
||||||
</code></pre>
|
</code></pre>
|
||||||
|
|
||||||
|
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
@ -899,7 +612,7 @@ sh ./bin/escheduler-daemon.sh stop alert-server
|
|||||||
<script>
|
<script>
|
||||||
var gitbook = gitbook || [];
|
var gitbook = gitbook || [];
|
||||||
gitbook.push(function() {
|
gitbook.push(function() {
|
||||||
gitbook.page.hasChanged({"page":{"title":"后端部署文档","level":"1.3.1","depth":2,"next":{"title":"系统使用手册","level":"1.4","depth":1,"anchor":"#使用手册","path":"系统使用手册.md","ref":"系统使用手册.md#使用手册","articles":[]},"previous":{"title":"后端部署文档","level":"1.3","depth":1,"ref":"","articles":[{"title":"后端部署文档","level":"1.3.1","depth":2,"anchor":"#部署文档","path":"后端部署文档.md","ref":"后端部署文档.md#部署文档","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"后端部署文档.md","mtime":"2019-04-08T08:09:31.074Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
|
gitbook.page.hasChanged({"page":{"title":"后端部署文档","level":"1.3.1","depth":2,"next":{"title":"系统使用手册","level":"1.4","depth":1,"anchor":"#使用手册","path":"系统使用手册.md","ref":"系统使用手册.md#使用手册","articles":[]},"previous":{"title":"后端部署文档","level":"1.3","depth":1,"ref":"","articles":[{"title":"后端部署文档","level":"1.3.1","depth":2,"anchor":"#部署文档","path":"后端部署文档.md","ref":"后端部署文档.md#部署文档","articles":[]}]},"dir":"ltr"},"config":{"plugins":["expandable-chapters","insert-logo-link","livereload"],"styles":{"website":"./styles/website.css"},"pluginsConfig":{"livereload":{},"insert-logo-link":{"src":"http://geek.analysys.cn/static/upload/236/2019-03-29/379450b4-7919-4707-877c-4d33300377d4.png","url":"https://github.com/analysys/EasyScheduler"},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"expandable-chapters":{}},"theme":"default","author":"YIGUAN","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"调度系统-EasyScheduler","language":"zh-hans","gitbook":"3.2.3","description":"调度系统"},"file":{"path":"后端部署文档.md","mtime":"2019-04-12T03:01:32.518Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2019-04-10T07:14:01.407Z"},"basePath":".","book":{"language":""}});
|
||||||
});
|
});
|
||||||
</script>
|
</script>
|
||||||
</div>
|
</div>
|
||||||
|
@ -45,7 +45,7 @@ API_BASE = http://192.168.220.204:12345
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
### 2.自动化部署`
|
### 2.自动化部署
|
||||||
|
|
||||||
在项目`escheduler-ui`根目录编辑安装文件`vi install(线上环境).sh`
|
在项目`escheduler-ui`根目录编辑安装文件`vi install(线上环境).sh`
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
* [Mysql](https://blog.csdn.net/u011886447/article/details/79796802) (5.5+) : 必装
|
* [Mysql](https://blog.csdn.net/u011886447/article/details/79796802) (5.5+) : 必装
|
||||||
* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : 必装
|
* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+) : 必装
|
||||||
* [ZooKeeper](https://www.jianshu.com/p/de90172ea680)(3.4.6) :必装
|
* [ZooKeeper](https://www.jianshu.com/p/de90172ea680)(3.4.6) :必装
|
||||||
* [Hadoop](https://blog.csdn.net/Evankaka/article/details/51612437)(2.7.3) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上)
|
* [Hadoop](https://blog.csdn.net/Evankaka/article/details/51612437)(2.6+) :选装, 如果需要使用到资源上传功能,MapReduce任务提交则需要配置Hadoop(上传的资源文件目前保存在Hdfs上)
|
||||||
* [Hive](https://staroon.pro/2017/12/09/HiveInstall/)(1.2.1) : 选装,hive任务提交需要安装
|
* [Hive](https://staroon.pro/2017/12/09/HiveInstall/)(1.2.1) : 选装,hive任务提交需要安装
|
||||||
* Spark(1.x,2.x) : 选装,Spark任务提交需要安装
|
* Spark(1.x,2.x) : 选装,Spark任务提交需要安装
|
||||||
* PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装
|
* PostgreSQL(8.2.15+) : 选装,PostgreSQL PostgreSQL存储过程需要安装
|
||||||
@ -27,15 +27,6 @@
|
|||||||
|
|
||||||
正常编译完后,会在当前目录生成 target/escheduler-{version}/
|
正常编译完后,会在当前目录生成 target/escheduler-{version}/
|
||||||
|
|
||||||
```
|
|
||||||
bin
|
|
||||||
conf
|
|
||||||
lib
|
|
||||||
script
|
|
||||||
sql
|
|
||||||
install.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
- 说明
|
- 说明
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -74,7 +65,7 @@ mysql -h {host} -u {user} -p{password} -D {db} < quartz.sql
|
|||||||
|
|
||||||
## 创建部署用户
|
## 创建部署用户
|
||||||
|
|
||||||
因为escheduler worker都是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。
|
- 在所有需要部署调度的机器上创建部署用户,因为worker服务是以 sudo -u {linux-user} 方式来执行作业,所以部署用户需要有 sudo 权限,而且是免密的。
|
||||||
|
|
||||||
```部署账号
|
```部署账号
|
||||||
vi /etc/sudoers
|
vi /etc/sudoers
|
||||||
@ -86,386 +77,73 @@ escheduler ALL=(ALL) NOPASSWD: NOPASSWD: ALL
|
|||||||
#Default requiretty
|
#Default requiretty
|
||||||
```
|
```
|
||||||
|
|
||||||
## 配置文件说明
|
## ssh免密配置
|
||||||
|
在部署机器和其他安装机器上配置ssh免密登录,如果要在部署机上安装调度,需要配置本机免密登录自己
|
||||||
```
|
|
||||||
说明:配置文件位于 target/escheduler-{version}/conf 下面
|
|
||||||
```
|
|
||||||
|
|
||||||
### escheduler-alert
|
|
||||||
|
|
||||||
配置邮件告警信息
|
|
||||||
|
|
||||||
|
|
||||||
* alert.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
#以qq邮箱为例,如果是别的邮箱,请更改对应配置
|
|
||||||
#alert type is EMAIL/SMS
|
|
||||||
alert.type=EMAIL
|
|
||||||
|
|
||||||
# mail server configuration
|
|
||||||
mail.protocol=SMTP
|
|
||||||
mail.server.host=smtp.exmail.qq.com
|
|
||||||
mail.server.port=25
|
|
||||||
mail.sender=xxxxxxx@qq.com
|
|
||||||
mail.passwd=xxxxxxx
|
|
||||||
|
|
||||||
# xls file path, need manually create it before use if not exist
|
|
||||||
xls.file.path=/opt/xls
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### escheduler-common
|
|
||||||
|
|
||||||
通用配置文件配置,队列选择及地址配置,通用文件目录配置
|
|
||||||
|
|
||||||
- common/common.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
#task queue implementation, default "zookeeper"
|
|
||||||
escheduler.queue.impl=zookeeper
|
|
||||||
|
|
||||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
|
|
||||||
data.basedir.path=/tmp/escheduler
|
|
||||||
|
|
||||||
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
|
|
||||||
data.download.basedir.path=/tmp/escheduler/download
|
|
||||||
|
|
||||||
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
|
|
||||||
process.exec.basepath=/tmp/escheduler/exec
|
|
||||||
|
|
||||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
|
|
||||||
data.store2hdfs.basepath=/escheduler
|
|
||||||
|
|
||||||
# whether hdfs starts
|
|
||||||
hdfs.startup.state=true
|
|
||||||
|
|
||||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
|
||||||
escheduler.env.path=/opt/.escheduler_env.sh
|
|
||||||
escheduler.env.py=/opt/escheduler_env.py
|
|
||||||
|
|
||||||
#resource.view.suffixs
|
|
||||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
|
|
||||||
|
|
||||||
# is development state? default "false"
|
|
||||||
development.state=false
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
SHELL任务 环境变量配置
|
|
||||||
|
|
||||||
```
|
|
||||||
说明:配置文件位于 target/escheduler-{version}/conf/env 下面,这个会是Worker执行任务时加载的环境
|
|
||||||
```
|
|
||||||
|
|
||||||
.escheduler_env.sh
|
|
||||||
```
|
|
||||||
export HADOOP_HOME=/opt/soft/hadoop
|
|
||||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
|
||||||
export SPARK_HOME1=/opt/soft/spark1
|
|
||||||
export SPARK_HOME2=/opt/soft/spark2
|
|
||||||
export PYTHON_HOME=/opt/soft/python
|
|
||||||
export JAVA_HOME=/opt/soft/java
|
|
||||||
export HIVE_HOME=/opt/soft/hive
|
|
||||||
|
|
||||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Python任务 环境变量配置
|
|
||||||
|
|
||||||
```
|
|
||||||
说明:配置文件位于 target/escheduler-{version}/conf/env 下面
|
|
||||||
```
|
|
||||||
|
|
||||||
escheduler_env.py
|
|
||||||
```
|
|
||||||
import os
|
|
||||||
|
|
||||||
HADOOP_HOME="/opt/soft/hadoop"
|
|
||||||
SPARK_HOME1="/opt/soft/spark1"
|
|
||||||
SPARK_HOME2="/opt/soft/spark2"
|
|
||||||
PYTHON_HOME="/opt/soft/python"
|
|
||||||
JAVA_HOME="/opt/soft/java"
|
|
||||||
HIVE_HOME="/opt/soft/hive"
|
|
||||||
PATH=os.environ['PATH']
|
|
||||||
PATH="%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s/bin:%s"%(HIVE_HOME,HADOOP_HOME,SPARK_HOME1,SPARK_HOME2,JAVA_HOME,PYTHON_HOME,PATH)
|
|
||||||
|
|
||||||
os.putenv('PATH','%s'%PATH)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
hadoop 配置文件
|
|
||||||
|
|
||||||
- common/hadoop/hadoop.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory
|
|
||||||
fs.defaultFS=hdfs://mycluster:8020
|
|
||||||
|
|
||||||
#resourcemanager ha note this need ips , this empty if single
|
|
||||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
|
||||||
|
|
||||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
|
|
||||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
定时器配置文件
|
|
||||||
|
|
||||||
- quartz.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
#============================================================================
|
|
||||||
# Configure Main Scheduler Properties
|
|
||||||
#============================================================================
|
|
||||||
org.quartz.scheduler.instanceName = EasyScheduler
|
|
||||||
org.quartz.scheduler.instanceId = AUTO
|
|
||||||
org.quartz.scheduler.makeSchedulerThreadDaemon = true
|
|
||||||
org.quartz.jobStore.useProperties = false
|
|
||||||
|
|
||||||
#============================================================================
|
|
||||||
# Configure ThreadPool
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.threadPool.class = org.quartz.simpl.SimpleThreadPool
|
|
||||||
org.quartz.threadPool.makeThreadsDaemons = true
|
|
||||||
org.quartz.threadPool.threadCount = 25
|
|
||||||
org.quartz.threadPool.threadPriority = 5
|
|
||||||
|
|
||||||
#============================================================================
|
|
||||||
# Configure JobStore
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.jobStore.class = org.quartz.impl.jdbcjobstore.JobStoreTX
|
- [将 **主机器** 和各个其它机器SSH打通](http://geek.analysys.cn/topic/113)
|
||||||
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
|
|
||||||
org.quartz.jobStore.tablePrefix = QRTZ_
|
|
||||||
org.quartz.jobStore.isClustered = true
|
|
||||||
org.quartz.jobStore.misfireThreshold = 60000
|
|
||||||
org.quartz.jobStore.clusterCheckinInterval = 5000
|
|
||||||
org.quartz.jobStore.dataSource = myDs
|
|
||||||
|
|
||||||
#============================================================================
|
## 部署
|
||||||
# Configure Datasources
|
|
||||||
#============================================================================
|
|
||||||
|
|
||||||
org.quartz.dataSource.myDs.driver = com.mysql.jdbc.Driver
|
|
||||||
org.quartz.dataSource.myDs.URL = jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=utf8&useSSL=false
|
|
||||||
org.quartz.dataSource.myDs.user = xx
|
|
||||||
org.quartz.dataSource.myDs.password = xx
|
|
||||||
org.quartz.dataSource.myDs.maxConnections = 10
|
|
||||||
org.quartz.dataSource.myDs.validationQuery = select 1
|
|
||||||
```
|
|
||||||
|
|
||||||
|
### 1. 修改安装目录权限
|
||||||
|
|
||||||
|
- 安装目录如下:
|
||||||
zookeeper 配置文件
|
|
||||||
|
|
||||||
|
|
||||||
- zookeeper.properties
|
|
||||||
|
|
||||||
```
|
```
|
||||||
#zookeeper cluster
|
bin
|
||||||
zookeeper.quorum=192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
|
conf
|
||||||
|
install.sh
|
||||||
|
lib
|
||||||
|
script
|
||||||
|
sql
|
||||||
|
|
||||||
|
```
|
||||||
|
- 修改权限(deployUser修改为对应部署用户)
|
||||||
|
|
||||||
#escheduler root directory
|
`sudo chown -R deployUser:deployUser *`
|
||||||
zookeeper.escheduler.root=/escheduler
|
|
||||||
|
|
||||||
#zookeeper server dirctory
|
### 2. 修改环境变量文件
|
||||||
zookeeper.escheduler.dead.servers=/escheduler/dead-servers
|
|
||||||
zookeeper.escheduler.masters=/escheduler/masters
|
|
||||||
zookeeper.escheduler.workers=/escheduler/workers
|
|
||||||
|
|
||||||
#zookeeper lock dirctory
|
- 根据业务需求,修改conf/env/目录下的**escheduler_env.py**,**.escheduler_env.sh**两个文件中的环境变量
|
||||||
zookeeper.escheduler.lock.masters=/escheduler/lock/masters
|
|
||||||
zookeeper.escheduler.lock.workers=/escheduler/lock/workers
|
|
||||||
|
|
||||||
#escheduler failover directory
|
### 3. 修改部署参数
|
||||||
zookeeper.escheduler.lock.masters.failover=/escheduler/lock/failover/masters
|
|
||||||
zookeeper.escheduler.lock.workers.failover=/escheduler/lock/failover/workers
|
|
||||||
|
|
||||||
#escheduler failover directory
|
- 修改 **install.sh**中的参数,替换成自身业务所需的值
|
||||||
zookeeper.session.timeout=300
|
|
||||||
zookeeper.connection.timeout=300
|
|
||||||
zookeeper.retry.sleep=1000
|
|
||||||
zookeeper.retry.maxtime=5
|
|
||||||
|
|
||||||
|
- 如果使用hdfs相关功能,需要拷贝**hdfs-site.xml**和**core-site.xml**到conf目录下
|
||||||
|
|
||||||
|
### 4. 一键部署
|
||||||
|
|
||||||
|
- 安装zookeeper工具
|
||||||
|
|
||||||
|
`pip install kazoo`
|
||||||
|
|
||||||
|
- 切换到部署用户,一键部署
|
||||||
|
|
||||||
|
`sh install.sh`
|
||||||
|
|
||||||
|
- jps查看服务是否启动
|
||||||
|
|
||||||
|
```aidl
|
||||||
|
MasterServer ----- master服务
|
||||||
|
WorkerServer ----- worker服务
|
||||||
|
LoggerServer ----- logger服务
|
||||||
|
ApiApplicationServer ----- api服务
|
||||||
|
AlertServer ----- alert服务
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 日志查看
|
||||||
|
日志统一存放于指定文件夹内
|
||||||
|
|
||||||
|
```日志路径
|
||||||
### escheduler-dao
|
logs/
|
||||||
|
├── escheduler-alert-server.log
|
||||||
dao数据源配置
|
├── escheduler-master-server.log
|
||||||
|
|—— escheduler-worker-server.log
|
||||||
- dao/data_source.properties
|
|—— escheduler-api-server.log
|
||||||
|
|—— escheduler-logger-server.log
|
||||||
```
|
```
|
||||||
# base spring data source configuration
|
|
||||||
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
|
## 启停服务
|
||||||
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
|
|
||||||
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/escheduler?characterEncoding=UTF-8
|
|
||||||
spring.datasource.username=xx
|
|
||||||
spring.datasource.password=xx
|
|
||||||
|
|
||||||
# connection configuration
|
|
||||||
spring.datasource.initialSize=5
|
|
||||||
# min connection number
|
|
||||||
spring.datasource.minIdle=5
|
|
||||||
# max connection number
|
|
||||||
spring.datasource.maxActive=50
|
|
||||||
|
|
||||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
|
||||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
|
||||||
spring.datasource.maxWait=60000
|
|
||||||
|
|
||||||
# milliseconds for check to close free connections
|
|
||||||
spring.datasource.timeBetweenEvictionRunsMillis=60000
|
|
||||||
|
|
||||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
|
||||||
spring.datasource.timeBetweenConnectErrorMillis=60000
|
|
||||||
|
|
||||||
# the longest time a connection remains idle without being evicted, in milliseconds
|
|
||||||
spring.datasource.minEvictableIdleTimeMillis=300000
|
|
||||||
|
|
||||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
|
||||||
spring.datasource.validationQuery=SELECT 1
|
|
||||||
#check whether the connection is valid for timeout, in seconds
|
|
||||||
spring.datasource.validationQueryTimeout=3
|
|
||||||
|
|
||||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
|
||||||
# validation Query is performed to check whether the connection is valid
|
|
||||||
spring.datasource.testWhileIdle=true
|
|
||||||
|
|
||||||
#execute validation to check if the connection is valid when applying for a connection
|
|
||||||
spring.datasource.testOnBorrow=true
|
|
||||||
#execute validation to check if the connection is valid when the connection is returned
|
|
||||||
spring.datasource.testOnReturn=false
|
|
||||||
spring.datasource.defaultAutoCommit=true
|
|
||||||
spring.datasource.keepAlive=true
|
|
||||||
|
|
||||||
# open PSCache, specify count PSCache for every connection
|
|
||||||
spring.datasource.poolPreparedStatements=true
|
|
||||||
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### escheduler-server
|
|
||||||
|
|
||||||
master配置文件
|
|
||||||
|
|
||||||
- master.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
# master execute thread num
|
|
||||||
master.exec.threads=100
|
|
||||||
|
|
||||||
# master execute task number in parallel
|
|
||||||
master.exec.task.number=20
|
|
||||||
|
|
||||||
# master heartbeat interval
|
|
||||||
master.heartbeat.interval=10
|
|
||||||
|
|
||||||
# master commit task retry times
|
|
||||||
master.task.commit.retryTimes=5
|
|
||||||
|
|
||||||
# master commit task interval
|
|
||||||
master.task.commit.interval=100
|
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
|
|
||||||
master.max.cpuload.avg=10
|
|
||||||
|
|
||||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
|
|
||||||
master.reserved.memory=1
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
worker配置文件
|
|
||||||
|
|
||||||
- worker.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
# worker execute thread num
|
|
||||||
worker.exec.threads=100
|
|
||||||
|
|
||||||
# worker heartbeat interval
|
|
||||||
worker.heartbeat.interval=10
|
|
||||||
|
|
||||||
# submit the number of tasks at a time
|
|
||||||
worker.fetch.task.num = 10
|
|
||||||
|
|
||||||
|
|
||||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
|
|
||||||
worker.max.cpuload.avg=10
|
|
||||||
|
|
||||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
|
|
||||||
worker.reserved.memory=1
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### escheduler-api
|
|
||||||
|
|
||||||
web配置文件
|
|
||||||
|
|
||||||
- application.properties
|
|
||||||
|
|
||||||
```
|
|
||||||
# server port
|
|
||||||
server.port=12345
|
|
||||||
|
|
||||||
# session config
|
|
||||||
server.session.timeout=7200
|
|
||||||
|
|
||||||
server.context-path=/escheduler/
|
|
||||||
|
|
||||||
# file size limit for upload
|
|
||||||
spring.http.multipart.max-file-size=1024MB
|
|
||||||
spring.http.multipart.max-request-size=1024MB
|
|
||||||
|
|
||||||
# post content
|
|
||||||
server.max-http-post-size=5000000
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 伪分布式部署
|
|
||||||
|
|
||||||
### 1,创建部署用户
|
|
||||||
|
|
||||||
如上 **创建部署用户**
|
|
||||||
|
|
||||||
### 2,根据实际需求来创建HDFS根路径
|
|
||||||
|
|
||||||
根据 **common/common.properties** 中 **hdfs.startup.state** 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 **owner** 修改为**部署用户**,否则忽略此步骤
|
|
||||||
|
|
||||||
### 3,项目编译
|
|
||||||
|
|
||||||
如上进行 **项目编译**
|
|
||||||
|
|
||||||
### 4,修改配置文件
|
|
||||||
|
|
||||||
根据 **配置文件说明** 修改配置文件和 **环境变量** 文件
|
|
||||||
|
|
||||||
### 5,创建目录并将环境变量文件复制到指定目录
|
|
||||||
|
|
||||||
- 创建 **common/common.properties** 下的data.basedir.path、data.download.basedir.path和process.exec.basepath路径
|
|
||||||
|
|
||||||
- 将**.escheduler_env.sh** 和 **escheduler_env.py** 两个环境变量文件复制到 **common/common.properties**配置的**escheduler.env.path** 和 **escheduler.env.py** 的目录下,并将 **owner** 修改为**部署用户**
|
|
||||||
|
|
||||||
### 6,启停服务
|
|
||||||
|
|
||||||
* 启停Master
|
* 启停Master
|
||||||
|
|
||||||
@ -500,68 +178,3 @@ sh ./bin/escheduler-daemon.sh start alert-server
|
|||||||
sh ./bin/escheduler-daemon.sh stop alert-server
|
sh ./bin/escheduler-daemon.sh stop alert-server
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 分布式部署
|
|
||||||
|
|
||||||
### 1,创建部署用户
|
|
||||||
|
|
||||||
- 在需要部署调度的机器上如上 **创建部署用户**
|
|
||||||
- [将 **主机器** 和各个其它机器SSH打通](https://blog.csdn.net/thinkmore1314/article/details/22489203)
|
|
||||||
|
|
||||||
### 2,根据实际需求来创建HDFS根路径
|
|
||||||
|
|
||||||
根据 **common/common.properties** 中 **hdfs.startup.state** 的配置来判断是否启动HDFS,如果启动,则需要创建HDFS根路径,并将 **owner** 修改为**部署用户**,否则忽略此步骤
|
|
||||||
|
|
||||||
### 3,项目编译
|
|
||||||
|
|
||||||
如上进行 **项目编译**
|
|
||||||
|
|
||||||
### 4,将环境变量文件复制到指定目录
|
|
||||||
|
|
||||||
将**.escheduler_env.sh** 和 **escheduler_env.py** 两个环境变量文件复制到 **common/common.properties**配置的**escheduler.env.path** 和 **escheduler.env.py** 的目录下,并将 **owner** 修改为**部署用户**
|
|
||||||
|
|
||||||
### 5,修改 install.sh
|
|
||||||
|
|
||||||
修改 install.sh 中变量的值,替换成自身业务所需的值
|
|
||||||
|
|
||||||
### 6,一键部署
|
|
||||||
|
|
||||||
- 安装 pip install kazoo
|
|
||||||
- 安装目录如下:
|
|
||||||
|
|
||||||
```
|
|
||||||
bin
|
|
||||||
conf
|
|
||||||
escheduler-1.0.0-SNAPSHOT.tar.gz
|
|
||||||
install.sh
|
|
||||||
lib
|
|
||||||
monitor_server.py
|
|
||||||
script
|
|
||||||
sql
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
- 使用部署用户 sh install.sh 一键部署
|
|
||||||
|
|
||||||
- 注意:scp_hosts.sh 里 `tar -zxvf $workDir/../escheduler-1.0.0.tar.gz -C $installPath` 中的版本号(1.0.0)需要执行前手动替换成对应的版本号
|
|
||||||
|
|
||||||
## 服务监控
|
|
||||||
|
|
||||||
monitor_server.py 脚本是监听,master和worker服务挂掉重启的脚本
|
|
||||||
|
|
||||||
注意:在全部服务都启动之后启动
|
|
||||||
|
|
||||||
nohup python -u monitor_server.py > nohup.out 2>&1 &
|
|
||||||
|
|
||||||
## 日志查看
|
|
||||||
日志统一存放于指定文件夹内
|
|
||||||
|
|
||||||
```日志路径
|
|
||||||
logs/
|
|
||||||
├── escheduler-alert-server.log
|
|
||||||
├── escheduler-master-server.log
|
|
||||||
|—— escheduler-worker-server.log
|
|
||||||
|—— escheduler-api-server.log
|
|
||||||
|—— escheduler-logger-server.log
|
|
||||||
```
|
|
@ -66,21 +66,39 @@
|
|||||||
if (this.item) {
|
if (this.item) {
|
||||||
param.id = this.item.id
|
param.id = this.item.id
|
||||||
}
|
}
|
||||||
this._verifyName(param).then(() => {
|
|
||||||
|
let $then = (res) => {
|
||||||
|
this.$emit('onUpdate')
|
||||||
|
this.$message.success(res.msg)
|
||||||
|
setTimeout(() => {
|
||||||
|
this.$refs['popup'].spinnerLoading = false
|
||||||
|
}, 800)
|
||||||
|
}
|
||||||
|
|
||||||
|
let $catch = (e) => {
|
||||||
|
this.$message.error(e.msg || '')
|
||||||
|
this.$refs['popup'].spinnerLoading = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.item) {
|
||||||
this.$refs['popup'].spinnerLoading = true
|
this.$refs['popup'].spinnerLoading = true
|
||||||
this.store.dispatch(`security/${this.item ? 'updateQueueQ' : 'createQueueQ'}`, param).then(res => {
|
this.store.dispatch(`security/updateQueueQ`, param).then(res => {
|
||||||
this.$emit('onUpdate')
|
$then(res)
|
||||||
this.$message.success(res.msg)
|
}).catch(e => {
|
||||||
setTimeout(() => {
|
$catch(e)
|
||||||
this.$refs['popup'].spinnerLoading = false
|
})
|
||||||
}, 800)
|
}else{
|
||||||
|
this._verifyName(param).then(() => {
|
||||||
|
this.$refs['popup'].spinnerLoading = true
|
||||||
|
this.store.dispatch(`security/createQueueQ`, param).then(res => {
|
||||||
|
$then(res)
|
||||||
|
}).catch(e => {
|
||||||
|
$catch(e)
|
||||||
|
})
|
||||||
}).catch(e => {
|
}).catch(e => {
|
||||||
this.$message.error(e.msg || '')
|
this.$message.error(e.msg || '')
|
||||||
this.$refs['popup'].spinnerLoading = false
|
|
||||||
})
|
})
|
||||||
}).catch(e => {
|
}
|
||||||
this.$message.error(e.msg || '')
|
|
||||||
})
|
|
||||||
|
|
||||||
},
|
},
|
||||||
_verification(){
|
_verification(){
|
||||||
|
115
install.sh
115
install.sh
@ -47,8 +47,57 @@ mysqlUserName="xx"
|
|||||||
# mysql 密码
|
# mysql 密码
|
||||||
mysqlPassword="xx"
|
mysqlPassword="xx"
|
||||||
|
|
||||||
|
# conf/config/install_config.conf配置
|
||||||
|
# 安装路径,不要当前路径(pwd)一样
|
||||||
|
installPath="/data1_1T/escheduler"
|
||||||
|
|
||||||
|
# 部署用户
|
||||||
|
deployUser="escheduler"
|
||||||
|
|
||||||
|
# zk集群
|
||||||
|
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
|
||||||
|
|
||||||
|
# 安装hosts
|
||||||
|
ips="ark0,ark1,ark2,ark3,ark4"
|
||||||
|
|
||||||
|
# conf/config/run_config.conf配置
|
||||||
|
# 运行Master的机器
|
||||||
|
masters="ark0,ark1"
|
||||||
|
|
||||||
|
# 运行Worker的机器
|
||||||
|
workers="ark2,ark3,ark4"
|
||||||
|
|
||||||
|
# 运行Alert的机器
|
||||||
|
alertServer="ark3"
|
||||||
|
|
||||||
|
# 运行Api的机器
|
||||||
|
apiServers="ark1"
|
||||||
|
|
||||||
|
# alert配置
|
||||||
|
# 邮件协议
|
||||||
|
mailProtocol="SMTP"
|
||||||
|
|
||||||
|
# 邮件服务host
|
||||||
|
mailServerHost="smtp.exmail.qq.com"
|
||||||
|
|
||||||
|
# 邮件服务端口
|
||||||
|
mailServerPort="25"
|
||||||
|
|
||||||
|
# 发送人
|
||||||
|
mailSender="xxxxxxxxxx"
|
||||||
|
|
||||||
|
# 发送人密码
|
||||||
|
mailPassword="xxxxxxxxxx"
|
||||||
|
|
||||||
|
# 下载Excel路径
|
||||||
|
xlsFilePath="/tmp/xls"
|
||||||
|
|
||||||
|
|
||||||
# hadoop 配置
|
# hadoop 配置
|
||||||
|
# 是否启动hdfs,如果启动则为true,需要配置以下hadoop相关参数;
|
||||||
|
# 不启动设置为false,如果为false,以下配置不需要修改
|
||||||
|
hdfsStartupSate="false"
|
||||||
|
|
||||||
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
|
# namenode地址,支持HA,需要将core-site.xml和hdfs-site.xml放到conf目录下
|
||||||
namenodeFs="hdfs://mycluster:8020"
|
namenodeFs="hdfs://mycluster:8020"
|
||||||
|
|
||||||
@ -58,6 +107,8 @@ yarnHaIps="192.168.xx.xx,192.168.xx.xx"
|
|||||||
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
|
# 如果是单 resourcemanager,只需要配置一个主机名称,如果是resourcemanager HA,则默认配置就好
|
||||||
singleYarnIp="ark1"
|
singleYarnIp="ark1"
|
||||||
|
|
||||||
|
# hdfs根路径,根路径的owner必须是部署用户
|
||||||
|
hdfsPath="/escheduler"
|
||||||
|
|
||||||
# common 配置
|
# common 配置
|
||||||
# 程序路径
|
# 程序路径
|
||||||
@ -69,17 +120,11 @@ downloadPath="/tmp/escheduler/download"
|
|||||||
# 任务执行路径
|
# 任务执行路径
|
||||||
execPath="/tmp/escheduler/exec"
|
execPath="/tmp/escheduler/exec"
|
||||||
|
|
||||||
# hdfs根路径
|
|
||||||
hdfsPath="/escheduler"
|
|
||||||
|
|
||||||
# 是否启动hdfs,如果启动则为true,不启动设置为false
|
|
||||||
hdfsStartupSate="true"
|
|
||||||
|
|
||||||
# SHELL环境变量路径
|
# SHELL环境变量路径
|
||||||
shellEnvPath="/opt/.escheduler_env.sh"
|
shellEnvPath="$installPath/conf/env/.escheduler_env.sh"
|
||||||
|
|
||||||
# Python换将变量路径
|
# Python换将变量路径
|
||||||
pythonEnvPath="/opt/escheduler_env.py"
|
pythonEnvPath="$installPath/conf/env/escheduler_env.py"
|
||||||
|
|
||||||
# 资源文件的后缀
|
# 资源文件的后缀
|
||||||
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
|
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
|
||||||
@ -87,11 +132,7 @@ resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
|
|||||||
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
|
# 开发状态,如果是true,对于SHELL脚本可以在execPath目录下查看封装后的SHELL脚本,如果是false则执行完成直接删除
|
||||||
devState="true"
|
devState="true"
|
||||||
|
|
||||||
|
|
||||||
# zk 配置
|
# zk 配置
|
||||||
# zk集群
|
|
||||||
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
|
|
||||||
|
|
||||||
# zk根目录
|
# zk根目录
|
||||||
zkRoot="/escheduler"
|
zkRoot="/escheduler"
|
||||||
|
|
||||||
@ -168,7 +209,6 @@ workerMaxCupLoadAvg="10"
|
|||||||
# worker预留内存,用来判断master是否还有执行能力
|
# worker预留内存,用来判断master是否还有执行能力
|
||||||
workerReservedMemory="1"
|
workerReservedMemory="1"
|
||||||
|
|
||||||
|
|
||||||
# api 配置
|
# api 配置
|
||||||
# api 服务端口
|
# api 服务端口
|
||||||
apiServerPort="12345"
|
apiServerPort="12345"
|
||||||
@ -188,53 +228,6 @@ springMaxRequestSize="1024MB"
|
|||||||
# api 最大post请求大小
|
# api 最大post请求大小
|
||||||
apiMaxHttpPostSize="5000000"
|
apiMaxHttpPostSize="5000000"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# alert配置
|
|
||||||
|
|
||||||
# 邮件协议
|
|
||||||
mailProtocol="SMTP"
|
|
||||||
|
|
||||||
# 邮件服务host
|
|
||||||
mailServerHost="smtp.exmail.qq.com"
|
|
||||||
|
|
||||||
# 邮件服务端口
|
|
||||||
mailServerPort="25"
|
|
||||||
|
|
||||||
# 发送人
|
|
||||||
mailSender="xxxxxxxxxx"
|
|
||||||
|
|
||||||
# 发送人密码
|
|
||||||
mailPassword="xxxxxxxxxx"
|
|
||||||
|
|
||||||
# 下载Excel路径
|
|
||||||
xlsFilePath="/opt/xls"
|
|
||||||
|
|
||||||
# conf/config/install_config.conf配置
|
|
||||||
# 安装路径
|
|
||||||
installPath="/data1_1T/escheduler"
|
|
||||||
|
|
||||||
# 部署用户
|
|
||||||
deployUser="escheduler"
|
|
||||||
|
|
||||||
# 安装hosts
|
|
||||||
ips="ark0,ark1,ark2,ark3,ark4"
|
|
||||||
|
|
||||||
|
|
||||||
# conf/config/run_config.conf配置
|
|
||||||
# 运行Master的机器
|
|
||||||
masters="ark0,ark1"
|
|
||||||
|
|
||||||
# 运行Worker的机器
|
|
||||||
workers="ark2,ark3,ark4"
|
|
||||||
|
|
||||||
# 运行Alert的机器
|
|
||||||
alertServer="ark3"
|
|
||||||
|
|
||||||
# 运行Api的机器
|
|
||||||
apiServers="ark1"
|
|
||||||
|
|
||||||
|
|
||||||
# 1,替换文件
|
# 1,替换文件
|
||||||
echo "1,替换文件"
|
echo "1,替换文件"
|
||||||
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
|
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${mysqlHost}/${mysqlDb}?characterEncoding=UTF-8#g" conf/dao/data_source.properties
|
||||||
@ -317,8 +310,6 @@ sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/run_con
|
|||||||
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
|
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/run_config.conf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# 2,创建目录
|
# 2,创建目录
|
||||||
echo "2,创建目录"
|
echo "2,创建目录"
|
||||||
|
|
||||||
|
@ -5,8 +5,6 @@ workDir=`cd ${workDir};pwd`
|
|||||||
source $workDir/../conf/config/run_config.conf
|
source $workDir/../conf/config/run_config.conf
|
||||||
source $workDir/../conf/config/install_config.conf
|
source $workDir/../conf/config/install_config.conf
|
||||||
|
|
||||||
tar -zxvf $workDir/../EasyScheduler-1.0.0.tar.gz -C $installPath
|
|
||||||
|
|
||||||
hostsArr=(${ips//,/ })
|
hostsArr=(${ips//,/ })
|
||||||
for host in ${hostsArr[@]}
|
for host in ${hostsArr[@]}
|
||||||
do
|
do
|
||||||
|
Loading…
Reference in New Issue
Block a user