mirror of
https://gitee.com/dolphinscheduler/DolphinScheduler.git
synced 2024-12-04 21:28:00 +08:00
Merge remote-tracking branch 'remote/dev' into dev
This commit is contained in:
commit
336a7f346c
164
ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json
Normal file
164
ambari_plugin/common-services/DOLPHIN/1.3.0/alerts.json
Normal file
@ -0,0 +1,164 @@
|
||||
{
|
||||
"DOLPHIN": {
|
||||
"service": [],
|
||||
"DOLPHIN_API": [
|
||||
{
|
||||
"name": "dolphin_api_port_check",
|
||||
"label": "dolphin_api_port_check",
|
||||
"description": "dolphin_api_port_check.",
|
||||
"interval": 10,
|
||||
"scope": "ANY",
|
||||
"source": {
|
||||
"type": "PORT",
|
||||
"uri": "{{dolphin-application-api/server.port}}",
|
||||
"default_port": 12345,
|
||||
"reporting": {
|
||||
"ok": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}"
|
||||
},
|
||||
"warning": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}",
|
||||
"value": 1.5
|
||||
},
|
||||
"critical": {
|
||||
"text": "Connection failed: {0} to {1}:{2}",
|
||||
"value": 5.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_MASTER": [
|
||||
{
|
||||
"name": "DOLPHIN_MASTER_CHECK",
|
||||
"label": "check dolphin scheduler master status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_MASTER",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_WORKER": [
|
||||
{
|
||||
"name": "DOLPHIN_WORKER_CHECK",
|
||||
"label": "check dolphin scheduler worker status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_WORKER",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_ALERT": [
|
||||
{
|
||||
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK",
|
||||
"label": "check dolphin scheduler alert status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_ALERT",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_ALERT": [
|
||||
{
|
||||
"name": "DOLPHIN_DOLPHIN_LOGGER_CHECK",
|
||||
"label": "check dolphin scheduler alert status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.3.0/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_LOGGER",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,143 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>alert.type</name>
|
||||
<value>EMAIL</value>
|
||||
<description>alert type is EMAIL/SMS</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>alert.template</name>
|
||||
<value>html</value>
|
||||
<description>alter msg template, default is html template</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.protocol</name>
|
||||
<value>SMTP</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.server.host</name>
|
||||
<value>xxx.xxx.com</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.server.port</name>
|
||||
<value>25</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.sender</name>
|
||||
<value>admin</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.user</name>
|
||||
<value>admin</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.passwd</name>
|
||||
<value>000000</value>
|
||||
<description></description>
|
||||
<property-type>PASSWORD</property-type>
|
||||
<value-attributes>
|
||||
<type>password</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mail.smtp.starttls.enable</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.smtp.ssl.enable</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.smtp.ssl.trust</name>
|
||||
<value>xxx.xxx.com</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>enterprise.wechat.enable</name>
|
||||
<value>false</value>
|
||||
<description></description>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>true</value>
|
||||
<label>Enabled</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>false</value>
|
||||
<label>Disabled</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.corp.id</name>
|
||||
<value>wechatId</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.secret</name>
|
||||
<value>secret</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.agent.id</name>
|
||||
<value>agentId</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.users</name>
|
||||
<value>wechatUsers</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,87 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>server.port</name>
|
||||
<value>12345</value>
|
||||
<description>
|
||||
server port
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
</property>
|
||||
<property>
|
||||
<name>server.servlet.session.timeout</name>
|
||||
<value>7200</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>server.servlet.context-path</name>
|
||||
<value>/dolphinscheduler/</value>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.servlet.multipart.max-file-size</name>
|
||||
<value>1024</value>
|
||||
<value-attributes>
|
||||
<unit>MB</unit>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.servlet.multipart.max-request-size</name>
|
||||
<value>1024</value>
|
||||
<value-attributes>
|
||||
<unit>MB</unit>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>server.jetty.max-http-post-size</name>
|
||||
<value>5000000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.messages.encoding</name>
|
||||
<value>UTF-8</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.messages.basename</name>
|
||||
<value>i18n/messages</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>security.authentication.type</name>
|
||||
<value>PASSWORD</value>
|
||||
<description></description>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,158 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>resource.storage.type</name>
|
||||
<display-name>Choose Resource Upload Startup Type</display-name>
|
||||
<description>
|
||||
Resource upload startup type : HDFS,S3,NONE
|
||||
</description>
|
||||
<value>NONE</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>HDFS</value>
|
||||
<label>HDFS</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>S3</value>
|
||||
<label>S3</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NONE</value>
|
||||
<label>NONE</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>resource.upload.path</name>
|
||||
<value>/dolphinscheduler</value>
|
||||
<description>
|
||||
resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>data.basedir.path</name>
|
||||
<value>/tmp/dolphinscheduler</value>
|
||||
<description>
|
||||
user data local directory path, please make sure the directory exists and have read write permissions
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authentication.startup.state</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>true</value>
|
||||
<label>Enabled</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>false</value>
|
||||
<label>Disabled</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<description>whether kerberos starts</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>java.security.krb5.conf.path</name>
|
||||
<value>/opt/krb5.conf</value>
|
||||
<description>
|
||||
java.security.krb5.conf path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>login.user.keytab.username</name>
|
||||
<value>hdfs-mycluster@ESZ.COM</value>
|
||||
<description>
|
||||
LoginUserFromKeytab user
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>login.user.keytab.path</name>
|
||||
<value>/opt/hdfs.headless.keytab</value>
|
||||
<description>
|
||||
LoginUserFromKeytab path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>resource.view.suffixs</name>
|
||||
<value>txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdfs.root.user</name>
|
||||
<value>hdfs</value>
|
||||
<description>
|
||||
Users who have permission to create directories under the HDFS root path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://mycluster:8020</value>
|
||||
<description>
|
||||
HA or single namenode,
|
||||
If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory,
|
||||
support s3,for example : s3a://dolphinscheduler
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.endpoint</name>
|
||||
<value>http://host:9010</value>
|
||||
<description>
|
||||
s3 need,s3 endpoint
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.access.key</name>
|
||||
<value>A3DXS30FO22544RE</value>
|
||||
<description>
|
||||
s3 need,s3 access key
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.secret.key</name>
|
||||
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value>
|
||||
<description>
|
||||
s3 need,s3 secret key
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>kerberos.expire.time</name>
|
||||
<value>7</value>
|
||||
<description></description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
@ -0,0 +1,467 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>spring.datasource.initialSize</name>
|
||||
<value>5</value>
|
||||
<description>
|
||||
Init connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.minIdle</name>
|
||||
<value>5</value>
|
||||
<description>
|
||||
Min connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxActive</name>
|
||||
<value>50</value>
|
||||
<description>
|
||||
Max connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxWait</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
Max wait time for get a connection in milliseconds.
|
||||
If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
||||
If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.timeBetweenEvictionRunsMillis</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
Milliseconds for check to close free connections
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.timeBetweenConnectErrorMillis</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
The Destroy thread detects the connection interval and closes the physical connection in milliseconds
|
||||
if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.minEvictableIdleTimeMillis</name>
|
||||
<value>300000</value>
|
||||
<description>
|
||||
The longest time a connection remains idle without being evicted, in milliseconds
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.validationQuery</name>
|
||||
<value>SELECT 1</value>
|
||||
<description>
|
||||
The SQL used to check whether the connection is valid requires a query statement.
|
||||
If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.validationQueryTimeout</name>
|
||||
<value>3</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Check whether the connection is valid for timeout, in seconds
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testWhileIdle</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
When applying for a connection,
|
||||
if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
||||
validation Query is performed to check whether the connection is valid
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testOnBorrow</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Execute validation to check if the connection is valid when applying for a connection
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testOnReturn</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Execute validation to check if the connection is valid when the connection is returned
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.defaultAutoCommit</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.keepAlive</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>spring.datasource.poolPreparedStatements</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Open PSCache, specify count PSCache for every connection
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxPoolPreparedStatementPerConnectionSize</name>
|
||||
<value>20</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.spring.datasource.filters</name>
|
||||
<value>stat,wall,log4j</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.connectionProperties</name>
|
||||
<value>druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mybatis-plus.mapper-locations</name>
|
||||
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.typeEnumsPackage</name>
|
||||
<value>org.apache.dolphinscheduler.*.enums</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.typeAliasesPackage</name>
|
||||
<value>org.apache.dolphinscheduler.dao.entity</value>
|
||||
<description>
|
||||
Entity scan, where multiple packages are separated by a comma or semicolon
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.id-type</name>
|
||||
<value>AUTO</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>AUTO</value>
|
||||
<label>AUTO</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>INPUT</value>
|
||||
<label>INPUT</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>ID_WORKER</value>
|
||||
<label>ID_WORKER</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>UUID</value>
|
||||
<label>UUID</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Primary key type AUTO:" database ID AUTO ",
|
||||
INPUT:" user INPUT ID",
|
||||
ID_WORKER:" global unique ID (numeric type unique ID)",
|
||||
UUID:" global unique ID UUID";
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.field-strategy</name>
|
||||
<value>NOT_NULL</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>IGNORED</value>
|
||||
<label>IGNORED</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NOT_NULL</value>
|
||||
<label>NOT_NULL</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NOT_EMPTY</value>
|
||||
<label>NOT_EMPTY</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Field policy IGNORED:" ignore judgment ",
|
||||
NOT_NULL:" not NULL judgment "),
|
||||
NOT_EMPTY:" not NULL judgment"
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.column-underline</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
|
||||
<value>1</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
|
||||
<value>0</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.banner</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.cache-enabled</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
|
||||
<value>null</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.exec.task.num</name>
|
||||
<value>20</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.retryTimes</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.interval</name>
|
||||
<value>1000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.reserved.memory</name>
|
||||
<value>0.1</value>
|
||||
<value-attributes>
|
||||
<type>float</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.fetch.task.num</name>
|
||||
<value>3</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.reserved.memory</name>
|
||||
<value>0.1</value>
|
||||
<value-attributes>
|
||||
<type>float</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
</configuration>
|
@ -0,0 +1,123 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dolphin.database.type</name>
|
||||
<value>mysql</value>
|
||||
<description>Dolphin Scheduler DataBase Type Which Is Select</description>
|
||||
<display-name>Dolphin Database Type</display-name>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>mysql</value>
|
||||
<label>Mysql</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>postgresql</value>
|
||||
<label>Postgresql</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.host</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Host</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.port</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Port</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.username</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Username</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.password</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Password</display-name>
|
||||
<property-type>PASSWORD</property-type>
|
||||
<value-attributes>
|
||||
<type>password</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.user</name>
|
||||
<value></value>
|
||||
<description>Which user to install and admin dolphin scheduler</description>
|
||||
<display-name>Deploy User</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>dolphin.group</name>
|
||||
<value></value>
|
||||
<description>Which user to install and admin dolphin scheduler</description>
|
||||
<display-name>Deploy Group</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphinscheduler-env-content</name>
|
||||
<display-name>Dolphinscheduler Env template</display-name>
|
||||
<description>This is the jinja template for dolphinscheduler.env.sh file</description>
|
||||
<value>#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export HADOOP_HOME=/opt/soft/hadoop
|
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
||||
export SPARK_HOME1=/opt/soft/spark1
|
||||
export SPARK_HOME2=/opt/soft/spark2
|
||||
export PYTHON_HOME=/opt/soft/python
|
||||
export JAVA_HOME=/opt/soft/java
|
||||
export HIVE_HOME=/opt/soft/hive
|
||||
export FLINK_HOME=/opt/soft/flink</value>
|
||||
<value-attributes>
|
||||
<type>content</type>
|
||||
<empty-value-valid>false</empty-value-valid>
|
||||
<show-property-name>false</show-property-name>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,88 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>master.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master execute thread num</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.exec.task.num</name>
|
||||
<value>20</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master execute task number in parallel</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master heartbeat interval</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.retryTimes</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master commit task retry times</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.interval</name>
|
||||
<value>1000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master commit task interval</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>only less than cpu avg load, master server can work. default value : the number of cpu cores * 2</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.reserved.memory</name>
|
||||
<value>0.3</value>
|
||||
<description>only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>master.listen.port</name>
|
||||
<value>5678</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>master listen port</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,126 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>org.quartz.scheduler.instanceName</name>
|
||||
<value>DolphinScheduler</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<!-- 列举枚举值 -->
|
||||
<name>org.quartz.scheduler.instanceId</name>
|
||||
<value>AUTO</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.scheduler.makeSchedulerThreadDaemon</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.useProperties</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.class</name>
|
||||
<value>org.quartz.simpl.SimpleThreadPool</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.makeThreadsDaemons</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.threadCount</name>
|
||||
<value>25</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.threadPriority</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.class</name>
|
||||
<value>org.quartz.impl.jdbcjobstore.JobStoreTX</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.tablePrefix</name>
|
||||
<value>QRTZ_</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.isClustered</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.misfireThreshold</name>
|
||||
<value>60000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.clusterCheckinInterval</name>
|
||||
<value>5000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.acquireTriggersWithinLock</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.dataSource</name>
|
||||
<value>myDs</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.dataSource.myDs.connectionProvider.class</name>
|
||||
<value>org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider</value>
|
||||
<description></description>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,76 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>worker.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker execute thread num</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker heartbeat interval</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.fetch.task.num</name>
|
||||
<value>3</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>submit the number of tasks at a time</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.reserved.memory</name>
|
||||
<value>0.3</value>
|
||||
<description>only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G.</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>worker.listen.port</name>
|
||||
<value>1234</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker listen port</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.group</name>
|
||||
<value>default</value>
|
||||
<description>default worker group</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
@ -0,0 +1,84 @@
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dolphinscheduler.queue.impl</name>
|
||||
<value>zookeeper</value>
|
||||
<description>
|
||||
Task queue implementation, default "zookeeper"
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.dolphinscheduler.root</name>
|
||||
<value>/dolphinscheduler</value>
|
||||
<description>
|
||||
dolphinscheduler root directory
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.session.timeout</name>
|
||||
<value>300</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.connection.timeout</name>
|
||||
<value>300</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.base.sleep</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.max.sleep</name>
|
||||
<value>30000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.maxtime</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
137
ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml
Normal file
137
ambari_plugin/common-services/DOLPHIN/1.3.0/metainfo.xml
Normal file
@ -0,0 +1,137 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<metainfo>
|
||||
<schemaVersion>2.0</schemaVersion>
|
||||
<services>
|
||||
<service>
|
||||
<name>DOLPHIN</name>
|
||||
<displayName>Dolphin Scheduler</displayName>
|
||||
<comment>分布式易扩展的可视化DAG工作流任务调度系统</comment>
|
||||
<version>1.3.0</version>
|
||||
<components>
|
||||
<component>
|
||||
<name>DOLPHIN_MASTER</name>
|
||||
<displayName>DS Master</displayName>
|
||||
<category>MASTER</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_master_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_LOGGER</name>
|
||||
<displayName>DS Logger</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_logger_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_WORKER</name>
|
||||
<displayName>DS Worker</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<name>DOLPHIN/DOLPHIN_LOGGER</name>
|
||||
<scope>host</scope>
|
||||
<auto-deploy>
|
||||
<enabled>true</enabled>
|
||||
</auto-deploy>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_worker_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_ALERT</name>
|
||||
<displayName>DS Alert</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_alert_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_API</name>
|
||||
<displayName>DS_Api</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_api_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
</components>
|
||||
|
||||
<requiredServices>
|
||||
<service>ZOOKEEPER</service>
|
||||
</requiredServices>
|
||||
|
||||
<osSpecifics>
|
||||
<osSpecific>
|
||||
<osFamily>any</osFamily>
|
||||
<packages>
|
||||
<package>
|
||||
<name>apache-dolphinscheduler-incubating-1.3.0*</name>
|
||||
</package>
|
||||
</packages>
|
||||
</osSpecific>
|
||||
</osSpecifics>
|
||||
|
||||
<configuration-dependencies>
|
||||
<config-type>dolphin-alert</config-type>
|
||||
<config-type>dolphin-app-api</config-type>
|
||||
<config-type>dolphin-app-dao</config-type>
|
||||
<config-type>dolphin-common</config-type>
|
||||
<config-type>dolphin-env</config-type>
|
||||
<config-type>dolphin-quartz</config-type>
|
||||
</configuration-dependencies>
|
||||
|
||||
<themes>
|
||||
<theme>
|
||||
<fileName>theme.json</fileName>
|
||||
<default>true</default>
|
||||
</theme>
|
||||
</themes>
|
||||
|
||||
<quickLinksConfigurations-dir>quicklinks</quickLinksConfigurations-dir>
|
||||
<quickLinksConfigurations>
|
||||
<quickLinksConfiguration>
|
||||
<fileName>quicklinks.json</fileName>
|
||||
<default>true</default>
|
||||
</quickLinksConfiguration>
|
||||
</quickLinksConfigurations>
|
||||
</service>
|
||||
</services>
|
||||
</metainfo>
|
@ -0,0 +1,124 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import urllib2
|
||||
import os
|
||||
import logging
|
||||
import ambari_simplejson as json
|
||||
from resource_management.libraries.script.script import Script
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('utf-8')
|
||||
|
||||
logger = logging.getLogger('ambari_alerts')
|
||||
|
||||
config = Script.get_config()
|
||||
|
||||
|
||||
def get_tokens():
|
||||
"""
|
||||
Returns a tuple of tokens in the format {{site/property}} that will be used
|
||||
to build the dictionary passed into execute
|
||||
|
||||
:rtype tuple
|
||||
"""
|
||||
|
||||
def get_info(url, connection_timeout):
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = urllib2.urlopen(url, timeout=connection_timeout)
|
||||
json_data = response.read()
|
||||
return json_data
|
||||
finally:
|
||||
if response is not None:
|
||||
try:
|
||||
response.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def execute(configurations={}, parameters={}, host_name=None):
|
||||
"""
|
||||
Returns a tuple containing the result code and a pre-formatted result label
|
||||
|
||||
Keyword arguments:
|
||||
configurations : a mapping of configuration key to value
|
||||
parameters : a mapping of script parameter key to value
|
||||
host_name : the name of this host where the alert is running
|
||||
|
||||
:type configurations dict
|
||||
:type parameters dict
|
||||
:type host_name str
|
||||
"""
|
||||
|
||||
alert_name = parameters['alertName']
|
||||
|
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
|
||||
|
||||
pid = "0"
|
||||
|
||||
|
||||
from resource_management.core import sudo
|
||||
|
||||
is_running = True
|
||||
pid_file_path = ""
|
||||
if alert_name == 'DOLPHIN_MASTER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/master-server.pid"
|
||||
elif alert_name == 'DOLPHIN_WORKER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/worker-server.pid"
|
||||
elif alert_name == 'DOLPHIN_ALERT':
|
||||
pid_file_path = dolphin_pidfile_dir + "/alert-server.pid"
|
||||
elif alert_name == 'DOLPHIN_LOGGER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/logger-server.pid"
|
||||
elif alert_name == 'DOLPHIN_API':
|
||||
pid_file_path = dolphin_pidfile_dir + "/api-server.pid"
|
||||
|
||||
if not pid_file_path or not os.path.isfile(pid_file_path):
|
||||
is_running = False
|
||||
|
||||
try:
|
||||
pid = int(sudo.read_file(pid_file_path))
|
||||
except:
|
||||
is_running = False
|
||||
|
||||
try:
|
||||
# Kill will not actually kill the process
|
||||
# From the doc:
|
||||
# If sig is 0, then no signal is sent, but error checking is still
|
||||
# performed; this can be used to check for the existence of a
|
||||
# process ID or process group ID.
|
||||
sudo.kill(pid, 0)
|
||||
except OSError:
|
||||
is_running = False
|
||||
|
||||
if host_name is None:
|
||||
host_name = socket.getfqdn()
|
||||
|
||||
if not is_running:
|
||||
result_code = "CRITICAL"
|
||||
else:
|
||||
result_code = "OK"
|
||||
|
||||
label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code)
|
||||
|
||||
return ((result_code, [label]))
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
@ -0,0 +1,61 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinAlertService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "alert-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinAlertService().execute()
|
@ -0,0 +1,70 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinApiService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
|
||||
#init
|
||||
init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh")
|
||||
Execute(init_cmd, user=params.dolphin_user)
|
||||
|
||||
#upgrade
|
||||
upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh")
|
||||
Execute(upgrade_cmd, user=params.dolphin_user)
|
||||
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "api-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinApiService().execute()
|
@ -0,0 +1,123 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
|
||||
|
||||
def dolphin_env():
|
||||
import params
|
||||
|
||||
Directory(params.dolphin_pidfile_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_log_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_conf_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
|
||||
Directory(params.dolphin_common_map['data.basedir.path'],
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_env_path),
|
||||
mode=0777,
|
||||
content=InlineTemplate(params.dolphin_env_content),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"),
|
||||
mode=0755,
|
||||
content=Template("dolphin-daemon.sh.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/master.properties"),
|
||||
mode=0755,
|
||||
content=Template("master.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/worker.properties"),
|
||||
mode=0755,
|
||||
content=Template("worker.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/alert.properties"),
|
||||
mode=0755,
|
||||
content=Template("alert.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/datasource.properties"),
|
||||
mode=0755,
|
||||
content=Template("datasource.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/application-api.properties"),
|
||||
mode=0755,
|
||||
content=Template("application-api.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/common.properties"),
|
||||
mode=0755,
|
||||
content=Template("common.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/quartz.properties"),
|
||||
mode=0755,
|
||||
content=Template("quartz.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/zookeeper.properties"),
|
||||
mode=0755,
|
||||
content=Template("zookeeper.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
@ -0,0 +1,61 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinLoggerService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "logger-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinLoggerService().execute()
|
@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinMasterService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1")
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "master-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinMasterService().execute()
|
@ -0,0 +1,60 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinWorkerService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1")
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "worker-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinWorkerService().execute()
|
@ -0,0 +1,154 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
from resource_management import *
|
||||
from resource_management.core.logger import Logger
|
||||
from resource_management.libraries.functions import default
|
||||
|
||||
Logger.initialize_logger()
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('utf-8')
|
||||
|
||||
# server configurations
|
||||
config = Script.get_config()
|
||||
|
||||
# conf_dir = "/etc/"
|
||||
dolphin_home = "/opt/soft/dolphinscheduler"
|
||||
dolphin_conf_dir = dolphin_home + "/conf"
|
||||
dolphin_log_dir = dolphin_home + "/logs"
|
||||
dolphin_bin_dir = dolphin_home + "/bin"
|
||||
dolphin_lib_jars = dolphin_home + "/lib/*"
|
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
|
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", [])
|
||||
|
||||
# dolphin-env
|
||||
dolphin_env_map = {}
|
||||
dolphin_env_map.update(config['configurations']['dolphin-env'])
|
||||
|
||||
# which user to install and admin dolphin scheduler
|
||||
dolphin_user = dolphin_env_map['dolphin.user']
|
||||
dolphin_group = dolphin_env_map['dolphin.group']
|
||||
|
||||
# .dolphinscheduler_env.sh
|
||||
dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh'
|
||||
dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content']
|
||||
|
||||
# database config
|
||||
dolphin_database_config = {}
|
||||
dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type']
|
||||
dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username']
|
||||
dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password']
|
||||
if 'mysql' == dolphin_database_config['dolphin_database_type']:
|
||||
dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver'
|
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate'
|
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \
|
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \
|
||||
+ '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8'
|
||||
else:
|
||||
dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver'
|
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate'
|
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \
|
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \
|
||||
+ '/dolphinscheduler'
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# application-alert.properties
|
||||
dolphin_alert_map = {}
|
||||
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
|
||||
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
|
||||
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
|
||||
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
|
||||
|
||||
dolphin_alert_config_map = config['configurations']['dolphin-alert']
|
||||
|
||||
if dolphin_alert_config_map['enterprise.wechat.enable']:
|
||||
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
|
||||
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url
|
||||
dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg
|
||||
dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg
|
||||
|
||||
dolphin_alert_map.update(dolphin_alert_config_map)
|
||||
|
||||
|
||||
|
||||
# application-api.properties
|
||||
dolphin_app_api_map = {}
|
||||
dolphin_app_api_map.update(config['configurations']['dolphin-application-api'])
|
||||
|
||||
|
||||
# common.properties
|
||||
dolphin_common_map = {}
|
||||
|
||||
if 'yarn-site' in config['configurations'] and \
|
||||
'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
|
||||
yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
|
||||
yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s'
|
||||
dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address
|
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", [])
|
||||
if len(rmHosts) > 1:
|
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts)
|
||||
else:
|
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ''
|
||||
|
||||
dolphin_common_map_tmp = config['configurations']['dolphin-common']
|
||||
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
|
||||
process_exec_basepath = data_basedir_path + '/exec'
|
||||
data_download_basedir_path = data_basedir_path + '/download'
|
||||
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
|
||||
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
|
||||
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
|
||||
dolphin_common_map.update(config['configurations']['dolphin-common'])
|
||||
|
||||
# datasource.properties
|
||||
dolphin_datasource_map = {}
|
||||
dolphin_datasource_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource'
|
||||
dolphin_datasource_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver']
|
||||
dolphin_datasource_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url']
|
||||
dolphin_datasource_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username']
|
||||
dolphin_datasource_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password']
|
||||
dolphin_datasource_map.update(config['configurations']['dolphin-datasource'])
|
||||
|
||||
# master.properties
|
||||
dolphin_master_map = config['configurations']['dolphin-master']
|
||||
|
||||
# quartz.properties
|
||||
dolphin_quartz_map = {}
|
||||
dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass']
|
||||
dolphin_quartz_map.update(config['configurations']['dolphin-quartz'])
|
||||
|
||||
# worker.properties
|
||||
dolphin_worker_map = config['configurations']['dolphin-worker']
|
||||
|
||||
# zookeeper.properties
|
||||
dolphin_zookeeper_map={}
|
||||
zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", [])
|
||||
if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']:
|
||||
clientPort = config['configurations']['zoo.cfg']['clientPort']
|
||||
zookeeperPort = ":" + clientPort + ","
|
||||
dolphin_zookeeper_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
|
||||
dolphin_zookeeper_map.update(config['configurations']['dolphin-zookeeper'])
|
||||
|
||||
|
||||
|
@ -0,0 +1,31 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
from resource_management.libraries.functions import get_unique_id_and_date
|
||||
|
||||
class ServiceCheck(Script):
|
||||
def service_check(self, env):
|
||||
import params
|
||||
#env.set_params(params)
|
||||
|
||||
# Execute(format("which pika_server"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
ServiceCheck().execute()
|
@ -0,0 +1,23 @@
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
|
||||
config = Script.get_config()
|
||||
|
||||
dolphin_run_dir = "/opt/soft/run/dolphinscheduler/"
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_alert_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_app_api_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_common_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_datasource_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,116 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> "
|
||||
|
||||
# if no args specified, show usage
|
||||
if [ $# -le 1 ]; then
|
||||
echo $usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
startStop=$1
|
||||
shift
|
||||
command=$1
|
||||
shift
|
||||
|
||||
echo "Begin $startStop $command......"
|
||||
|
||||
BIN_DIR=`dirname $0`
|
||||
BIN_DIR=`cd "$BIN_DIR"; pwd`
|
||||
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
|
||||
|
||||
export HOSTNAME=`hostname`
|
||||
|
||||
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
|
||||
|
||||
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
|
||||
STOP_TIMEOUT=5
|
||||
|
||||
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out
|
||||
pid={{dolphin_pidfile_dir}}/$command.pid
|
||||
|
||||
cd $DOLPHINSCHEDULER_HOME
|
||||
|
||||
if [ "$command" = "api-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-api.xml -Dspring.profiles.active=api"
|
||||
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
|
||||
elif [ "$command" = "master-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-master.xml -Ddruid.mysql.usePingMethod=false"
|
||||
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
|
||||
elif [ "$command" = "worker-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-worker.xml -Ddruid.mysql.usePingMethod=false"
|
||||
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
|
||||
elif [ "$command" = "alert-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/logback-alert.xml"
|
||||
CLASS=org.apache.dolphinscheduler.alert.AlertServer
|
||||
elif [ "$command" = "logger-server" ]; then
|
||||
CLASS=org.apache.dolphinscheduler.server.log.LoggerServer
|
||||
else
|
||||
echo "Error: No command named \`$command' was found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $startStop in
|
||||
(start)
|
||||
|
||||
if [ -f $pid ]; then
|
||||
if kill -0 `cat $pid` > /dev/null 2>&1; then
|
||||
echo $command running as process `cat $pid`. Stop it first.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo starting $command, logging to $log
|
||||
|
||||
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS"
|
||||
|
||||
echo "nohup java $exec_command > $log 2>&1 < /dev/null &"
|
||||
nohup java $exec_command > $log 2>&1 < /dev/null &
|
||||
echo $! > $pid
|
||||
;;
|
||||
|
||||
(stop)
|
||||
|
||||
if [ -f $pid ]; then
|
||||
TARGET_PID=`cat $pid`
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo stopping $command
|
||||
kill $TARGET_PID
|
||||
sleep $STOP_TIMEOUT
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
|
||||
kill -9 $TARGET_PID
|
||||
fi
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
rm -f $pid
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
;;
|
||||
|
||||
(*)
|
||||
echo $usage
|
||||
exit 1
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
echo "End $startStop $command."
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_master_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_quartz_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_worker_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_zookeeper_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
26
ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json
Executable file
26
ambari_plugin/common-services/DOLPHIN/1.3.0/quicklinks/quicklinks.json
Executable file
@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "default",
|
||||
"description": "default quick links configuration",
|
||||
"configuration": {
|
||||
"protocol":
|
||||
{
|
||||
"type":"http"
|
||||
},
|
||||
|
||||
"links": [
|
||||
{
|
||||
"name": "dolphin-application-ui",
|
||||
"label": "DolphinApplication UI",
|
||||
"requires_user_name": "false",
|
||||
"component_name": "DOLPHIN_API",
|
||||
"url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html",
|
||||
"port":{
|
||||
"http_property": "server.port",
|
||||
"http_default_port": "12345",
|
||||
"regex": "^(\\d+)$",
|
||||
"site": "dolphin-application-api"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
661
ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json
Normal file
661
ambari_plugin/common-services/DOLPHIN/1.3.0/themes/theme.json
Normal file
@ -0,0 +1,661 @@
|
||||
{
|
||||
"name": "default",
|
||||
"description": "Default theme for Dolphin Scheduler service",
|
||||
"configuration": {
|
||||
"layouts": [
|
||||
{
|
||||
"name": "default",
|
||||
"tabs": [
|
||||
{
|
||||
"name": "settings",
|
||||
"display-name": "Settings",
|
||||
"layout": {
|
||||
"tab-rows": "3",
|
||||
"tab-columns": "3",
|
||||
"sections": [
|
||||
{
|
||||
"name": "dolphin-env-config",
|
||||
"display-name": "Dolphin Env Config",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "2",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "env-row1-col1",
|
||||
"display-name": "Deploy User Info",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "env-row1-col2",
|
||||
"display-name": "System Env Optimization",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dolphin-database-config",
|
||||
"display-name": "Database Config",
|
||||
"row-index": "1",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "3",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "database-row1-col1",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "database-row1-col2",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "database-row1-col3",
|
||||
"row-index": "0",
|
||||
"column-index": "2",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dynamic-config",
|
||||
"row-index": "2",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "3",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "dynamic-row1-col1",
|
||||
"display-name": "Resource FS Config",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "dynamic-row1-col2",
|
||||
"display-name": "Kerberos Info",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "dynamic-row1-col3",
|
||||
"display-name": "Wechat Info",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"placement": {
|
||||
"configuration-layout": "default",
|
||||
"configs": [
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.type",
|
||||
"subsection-name": "database-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.host",
|
||||
"subsection-name": "database-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.port",
|
||||
"subsection-name": "database-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.username",
|
||||
"subsection-name": "database-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.password",
|
||||
"subsection-name": "database-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.user",
|
||||
"subsection-name": "env-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.group",
|
||||
"subsection-name": "env-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphinscheduler-env-content",
|
||||
"subsection-name": "env-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/resource.storage.type",
|
||||
"subsection-name": "dynamic-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/resource.upload.path",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === HDFS || ${dolphin-common/resource.storage.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hdfs.root.user",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/data.store2hdfs.basepath",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.defaultFS",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.endpoint",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.access.key",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.secret.key",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/resource.storage.type"
|
||||
],
|
||||
"if": "${dolphin-common/resource.storage.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state",
|
||||
"subsection-name": "dynamic-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/java.security.krb5.conf.path",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.username",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.path",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/kerberos.expire.time",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.enable",
|
||||
"subsection-name": "dynamic-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.corp.id",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.secret",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.agent.id",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.users",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"widgets": [
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.type",
|
||||
"widget": {
|
||||
"type": "combo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.host",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.port",
|
||||
"widget": {
|
||||
"type": "text-field",
|
||||
"units": [
|
||||
{
|
||||
"unit-name": "int"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.username",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.password",
|
||||
"widget": {
|
||||
"type": "password"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.user",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.group",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphinscheduler-env-content",
|
||||
"widget": {
|
||||
"type": "text-area"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/resource.storage.type",
|
||||
"widget": {
|
||||
"type": "combo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/resource.upload.path",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hdfs.root.user",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/data.store2hdfs.basepath",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.defaultFS",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.endpoint",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.access.key",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.secret.key",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state",
|
||||
"widget": {
|
||||
"type": "toggle"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/java.security.krb5.conf.path",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.username",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.path",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/kerberos.expire.time",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.enable",
|
||||
"widget": {
|
||||
"type": "toggle"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.corp.id",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.secret",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.agent.id",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.users",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -20,7 +20,7 @@
|
||||
<services>
|
||||
<service>
|
||||
<name>DOLPHIN</name>
|
||||
<extends>common-services/DOLPHIN/2.0.0</extends>
|
||||
<extends>common-services/DOLPHIN/1.3.0</extends>
|
||||
</service>
|
||||
</services>
|
||||
</metainfo>
|
@ -1,226 +0,0 @@
|
||||
# Dolphin Scheduler
|
||||
|
||||
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
|
||||
|
||||
## Introduction
|
||||
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.10+
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
|
||||
$ cd incubator-dolphinscheduler
|
||||
$ helm install --name dolphinscheduler .
|
||||
```
|
||||
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `dolphinscheduler` deployment:
|
||||
|
||||
```bash
|
||||
$ helm delete --purge dolphinscheduler
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
|
||||
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
|
||||
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
|
||||
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
|
||||
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
|
||||
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
|
||||
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
|
||||
| | | |
|
||||
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
|
||||
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
|
||||
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
|
||||
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
|
||||
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
|
||||
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
|
||||
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
|
||||
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
|
||||
| | | |
|
||||
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
|
||||
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
|
||||
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
|
||||
| | | |
|
||||
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
|
||||
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
|
||||
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
|
||||
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
|
||||
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
|
||||
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
|
||||
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
|
||||
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
|
||||
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
|
||||
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
|
||||
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
|
||||
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
|
||||
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
|
||||
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
|
||||
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
|
||||
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
|
||||
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
|
||||
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
|
||||
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `ingress.enabled` | Enable ingress | `false` |
|
||||
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.tls.enabled` | Enable ingress tls | `false` |
|
||||
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
|
||||
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
|
||||
|
||||
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.
|
233
docker/docker-swarm/docker-compose.yml
Normal file
233
docker/docker-swarm/docker-compose.yml
Normal file
@ -0,0 +1,233 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
|
||||
dolphinscheduler-postgresql:
|
||||
image: bitnami/postgresql:latest
|
||||
container_name: dolphinscheduler-postgresql
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
volumes:
|
||||
- dolphinscheduler-postgresql:/bitnami/postgresql
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-zookeeper:
|
||||
image: bitnami/zookeeper:latest
|
||||
container_name: dolphinscheduler-zookeeper
|
||||
ports:
|
||||
- 2181:2181
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
ALLOW_ANONYMOUS_LOGIN: "yes"
|
||||
volumes:
|
||||
- dolphinscheduler-zookeeper:/bitnami/zookeeper
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-api:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
container_name: dolphinscheduler-api
|
||||
command: ["api-server"]
|
||||
ports:
|
||||
- 12345:12345
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- dolphinscheduler-api:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-frontend:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
container_name: dolphinscheduler-frontend
|
||||
command: ["frontend"]
|
||||
ports:
|
||||
- 8888:8888
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
|
||||
FRONTEND_API_SERVER_PORT: 12345
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-z", "localhost", "8888"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-api
|
||||
volumes:
|
||||
- dolphinscheduler-frontend:/var/log/nginx
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-alert:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
container_name: dolphinscheduler-alert
|
||||
command: ["alert-server"]
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
MAIL_SERVER_PORT: ""
|
||||
MAIL_SENDER: ""
|
||||
MAIL_USER: ""
|
||||
MAIL_PASSWD: ""
|
||||
MAIL_SMTP_STARTTLS_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_TRUST: ""
|
||||
ENTERPRISE_WECHAT_ENABLE: "false"
|
||||
ENTERPRISE_WECHAT_CORP_ID: ""
|
||||
ENTERPRISE_WECHAT_SECRET: ""
|
||||
ENTERPRISE_WECHAT_AGENT_ID: ""
|
||||
ENTERPRISE_WECHAT_USERS: ""
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
volumes:
|
||||
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-master:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
container_name: dolphinscheduler-master
|
||||
command: ["master-server"]
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
MASTER_EXEC_THREADS: "100"
|
||||
MASTER_EXEC_TASK_NUM: "20"
|
||||
MASTER_HEARTBEAT_INTERVAL: "10"
|
||||
MASTER_TASK_COMMIT_RETRYTIMES: "5"
|
||||
MASTER_TASK_COMMIT_INTERVAL: "1000"
|
||||
MASTER_MAX_CPULOAD_AVG: "100"
|
||||
MASTER_RESERVED_MEMORY: "0.1"
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- dolphinscheduler-master:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-worker:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
container_name: dolphinscheduler-worker
|
||||
command: ["worker-server"]
|
||||
ports:
|
||||
- 1234:1234
|
||||
- 50051:50051
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
WORKER_FETCH_TASK_NUM: "3"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_GROUP: "default"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./dolphinscheduler_env.sh
|
||||
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
- type: volume
|
||||
source: dolphinscheduler-worker-data
|
||||
target: /tmp/dolphinscheduler
|
||||
- type: volume
|
||||
source: dolphinscheduler-worker-logs
|
||||
target: /opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
networks:
|
||||
dolphinscheduler:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
dolphinscheduler-postgresql:
|
||||
dolphinscheduler-zookeeper:
|
||||
dolphinscheduler-api:
|
||||
dolphinscheduler-frontend:
|
||||
dolphinscheduler-alert:
|
||||
dolphinscheduler-master:
|
||||
dolphinscheduler-worker-data:
|
||||
dolphinscheduler-worker-logs:
|
||||
|
||||
configs:
|
||||
dolphinscheduler-worker-task-env:
|
||||
file: ./dolphinscheduler_env.sh
|
230
docker/docker-swarm/docker-stack.yml
Normal file
230
docker/docker-swarm/docker-stack.yml
Normal file
@ -0,0 +1,230 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: "3.4"
|
||||
|
||||
services:
|
||||
|
||||
dolphinscheduler-postgresql:
|
||||
image: bitnami/postgresql:latest
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
volumes:
|
||||
- dolphinscheduler-postgresql:/bitnami/postgresql
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-zookeeper:
|
||||
image: bitnami/zookeeper:latest
|
||||
ports:
|
||||
- 2181:2181
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
ALLOW_ANONYMOUS_LOGIN: "yes"
|
||||
volumes:
|
||||
- dolphinscheduler-zookeeper:/bitnami/zookeeper
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-api:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
command: ["api-server"]
|
||||
ports:
|
||||
- 12345:12345
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-api:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-frontend:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
command: ["frontend"]
|
||||
ports:
|
||||
- 8888:8888
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
|
||||
FRONTEND_API_SERVER_PORT: 12345
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-z", "localhost", "8888"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-frontend:/var/log/nginx
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-alert:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
command: ["alert-server"]
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
MAIL_SERVER_PORT: ""
|
||||
MAIL_SENDER: ""
|
||||
MAIL_USER: ""
|
||||
MAIL_PASSWD: ""
|
||||
MAIL_SMTP_STARTTLS_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_TRUST: ""
|
||||
ENTERPRISE_WECHAT_ENABLE: "false"
|
||||
ENTERPRISE_WECHAT_CORP_ID: ""
|
||||
ENTERPRISE_WECHAT_SECRET: ""
|
||||
ENTERPRISE_WECHAT_AGENT_ID: ""
|
||||
ENTERPRISE_WECHAT_USERS: ""
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-alert:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-master:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
command: ["master-server"]
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
MASTER_EXEC_THREADS: "100"
|
||||
MASTER_EXEC_TASK_NUM: "20"
|
||||
MASTER_HEARTBEAT_INTERVAL: "10"
|
||||
MASTER_TASK_COMMIT_RETRYTIMES: "5"
|
||||
MASTER_TASK_COMMIT_INTERVAL: "1000"
|
||||
MASTER_MAX_CPULOAD_AVG: "100"
|
||||
MASTER_RESERVED_MEMORY: "0.1"
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-master:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-worker:
|
||||
image: registry.cn-qingdao.aliyuncs.com/sxyj/dolphinscheduler:dev
|
||||
command: ["worker-server"]
|
||||
ports:
|
||||
- 1234:1234
|
||||
- 50051:50051
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
WORKER_FETCH_TASK_NUM: "3"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_GROUP: "default"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
POSTGRESQL_HOST: dolphinscheduler-postgresql
|
||||
POSTGRESQL_PORT: 5432
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
|
||||
- dolphinscheduler-worker-logs:/opt/dolphinscheduler/logs
|
||||
configs:
|
||||
- source: dolphinscheduler-worker-task-env
|
||||
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
networks:
|
||||
dolphinscheduler:
|
||||
driver: overlay
|
||||
|
||||
volumes:
|
||||
dolphinscheduler-postgresql:
|
||||
dolphinscheduler-zookeeper:
|
||||
dolphinscheduler-api:
|
||||
dolphinscheduler-frontend:
|
||||
dolphinscheduler-alert:
|
||||
dolphinscheduler-master:
|
||||
dolphinscheduler-worker-data:
|
||||
dolphinscheduler-worker-logs:
|
||||
|
||||
configs:
|
||||
dolphinscheduler-worker-task-env:
|
||||
file: ./dolphinscheduler_env.sh
|
20
docker/docker-swarm/dolphinscheduler_env.sh
Normal file
20
docker/docker-swarm/dolphinscheduler_env.sh
Normal file
@ -0,0 +1,20 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export PYTHON_HOME=/usr/bin/python2
|
||||
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
|
||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
|
@ -21,8 +21,8 @@ description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG wo
|
||||
home: https://dolphinscheduler.apache.org
|
||||
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
|
||||
keywords:
|
||||
- dolphinscheduler
|
||||
- Scheduler
|
||||
- dolphinscheduler
|
||||
- Scheduler
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
@ -35,18 +35,18 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.1.0
|
||||
version: 1.0.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 1.2.1
|
||||
appVersion: 1.3.0
|
||||
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -16,7 +16,9 @@ To install the chart with the release name `my-release`:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
|
||||
$ cd incubator-dolphinscheduler
|
||||
$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler
|
||||
$ helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
$ helm dependency update .
|
||||
$ helm install --name dolphinscheduler .
|
||||
```
|
||||
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
25
docker/kubernetes/dolphinscheduler/requirements.yaml
Normal file
25
docker/kubernetes/dolphinscheduler/requirements.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -130,20 +130,4 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
{{- define "dolphinscheduler.worker.base.dir" -}}
|
||||
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
|
||||
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default dolphinscheduler worker data download dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.data.download.dir" -}}
|
||||
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default dolphinscheduler worker process exec dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
|
||||
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
|
||||
{{- end -}}
|
@ -31,4 +31,6 @@ data:
|
||||
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
|
||||
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
|
||||
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
|
||||
MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
{{- end }}
|
@ -29,9 +29,9 @@ data:
|
||||
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
|
||||
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
|
||||
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
|
||||
WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }}
|
||||
WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
|
||||
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
|
||||
dolphinscheduler_env.sh: |-
|
||||
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
|
||||
{{ . }}
|
@ -166,19 +166,19 @@ spec:
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@ -188,14 +188,20 @@ spec:
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_DATABASE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlDatabase }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.database | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.alert.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
- AlertServer
|
||||
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
|
||||
@ -208,7 +214,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
- AlertServer
|
||||
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
|
@ -99,19 +99,19 @@ spec:
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
@ -122,6 +122,12 @@ spec:
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_DATABASE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlDatabase }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.database | quote }}
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
@ -109,8 +109,8 @@ spec:
|
||||
args:
|
||||
- "master-server"
|
||||
ports:
|
||||
- containerPort: 8888
|
||||
name: unused-tcp-port
|
||||
- containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
|
||||
name: "master-port"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
@ -150,6 +150,16 @@ spec:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_RESERVED_MEMORY
|
||||
- name: MASTER_LISTEN_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_LISTEN_PORT
|
||||
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
@ -178,11 +188,11 @@ spec:
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: TASK_QUEUE
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: {{ .Values.zookeeper.taskQueue }}
|
||||
- name: POSTGRESQL_DATABASE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlDatabase }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.taskQueue }}
|
||||
value: {{ .Values.externalDatabase.database | quote }}
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
@ -196,7 +206,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- master-server
|
||||
- MasterServer
|
||||
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
|
||||
@ -209,7 +219,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- master-server
|
||||
- MasterServer
|
||||
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
|
@ -109,6 +109,8 @@ spec:
|
||||
args:
|
||||
- "worker-server"
|
||||
ports:
|
||||
- containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
|
||||
name: "worker-port"
|
||||
- containerPort: 50051
|
||||
name: "logs-port"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
@ -140,6 +142,21 @@ spec:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_RESERVED_MEMORY
|
||||
- name: WORKER_LISTEN_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_LISTEN_PORT
|
||||
- name: WORKER_GROUP
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_GROUP
|
||||
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
@ -167,12 +184,12 @@ spec:
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: TASK_QUEUE
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: {{ .Values.zookeeper.taskQueue }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_DATABASE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlDatabase }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.taskQueue }}
|
||||
value: {{ .Values.externalDatabase.database | quote }}
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
@ -186,7 +203,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
- WorkerServer
|
||||
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
|
||||
@ -199,7 +216,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
- WorkerServer
|
||||
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
|
||||
@ -247,7 +264,7 @@ spec:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
|
||||
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
|
||||
@ -264,7 +281,7 @@ spec:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
|
||||
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
|
@ -25,10 +25,10 @@ metadata:
|
||||
spec:
|
||||
clusterIP: "None"
|
||||
ports:
|
||||
- port: 8888
|
||||
targetPort: tcp-port
|
||||
- port: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
|
||||
targetPort: master-port
|
||||
protocol: TCP
|
||||
name: unused-tcp-port
|
||||
name: master-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
@ -25,6 +25,10 @@ metadata:
|
||||
spec:
|
||||
clusterIP: "None"
|
||||
ports:
|
||||
- port: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
|
||||
targetPort: worker-port
|
||||
protocol: TCP
|
||||
name: worker-port
|
||||
- port: 50051
|
||||
targetPort: logs-port
|
||||
protocol: TCP
|
@ -27,7 +27,7 @@ timezone: "Asia/Shanghai"
|
||||
image:
|
||||
registry: "docker.io"
|
||||
repository: "dolphinscheduler"
|
||||
tag: "1.2.1"
|
||||
tag: "1.3.0"
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
imagePullSecrets: []
|
||||
@ -56,6 +56,8 @@ externalDatabase:
|
||||
zookeeper:
|
||||
enabled: true
|
||||
taskQueue: "zookeeper"
|
||||
service:
|
||||
port: "2181"
|
||||
persistence:
|
||||
enabled: false
|
||||
size: "20Gi"
|
||||
@ -91,6 +93,7 @@ master:
|
||||
MASTER_TASK_COMMIT_INTERVAL: "1000"
|
||||
MASTER_MAX_CPULOAD_AVG: "100"
|
||||
MASTER_RESERVED_MEMORY: "0.1"
|
||||
MASTER_LISTEN_PORT: "5678"
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
@ -156,6 +159,8 @@ worker:
|
||||
WORKER_FETCH_TASK_NUM: "3"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_LISTEN_PORT: "1234"
|
||||
WORKER_GROUP: "default"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
DOLPHINSCHEDULER_ENV:
|
||||
- "export HADOOP_HOME=/opt/soft/hadoop"
|
@ -27,7 +27,7 @@ ENV DEBIAN_FRONTEND noninteractive
|
||||
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
|
||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
|
||||
RUN apk update && \
|
||||
apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip tini && \
|
||||
apk add dos2unix shadow bash openrc python python3 sudo vim wget iputils net-tools openssh-server py2-pip tini && \
|
||||
apk add --update procps && \
|
||||
openrc boot && \
|
||||
pip install kazoo
|
||||
@ -67,6 +67,7 @@ ADD ./checkpoint.sh /root/checkpoint.sh
|
||||
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
|
||||
ADD ./startup.sh /root/startup.sh
|
||||
ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
|
||||
ADD ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
|
||||
ADD conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
|
||||
RUN chmod +x /root/checkpoint.sh && \
|
||||
chmod +x /root/startup-init-conf.sh && \
|
||||
|
@ -15,12 +15,6 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export HADOOP_HOME=/opt/soft/hadoop
|
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
||||
export SPARK_HOME1=/opt/soft/spark1
|
||||
export SPARK_HOME2=/opt/soft/spark2
|
||||
export PYTHON_HOME=/opt/soft/python
|
||||
export JAVA_HOME=/opt/soft/java
|
||||
export HIVE_HOME=/opt/soft/hive
|
||||
export FLINK_HOME=/opt/soft/flink
|
||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH
|
||||
export PYTHON_HOME=/usr/bin/python2
|
||||
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
|
||||
export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH
|
||||
|
52
dockerfile/conf/dolphinscheduler/logback/logback-alert.xml
Normal file
52
dockerfile/conf/dolphinscheduler/logback/logback-alert.xml
Normal file
@ -0,0 +1,52 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-alert.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>20</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
62
dockerfile/conf/dolphinscheduler/logback/logback-api.xml
Normal file
62
dockerfile/conf/dolphinscheduler/logback/logback-api.xml
Normal file
@ -0,0 +1,62 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- api server logback config start -->
|
||||
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-api-server.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- api server logback config end -->
|
||||
|
||||
<logger name="org.apache.zookeeper" level="WARN"/>
|
||||
<logger name="org.apache.hbase" level="WARN"/>
|
||||
<logger name="org.apache.hadoop" level="WARN"/>
|
||||
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
82
dockerfile/conf/dolphinscheduler/logback/logback-master.xml
Normal file
82
dockerfile/conf/dolphinscheduler/logback/logback-master.xml
Normal file
@ -0,0 +1,82 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<conversionRule conversionWord="messsage"
|
||||
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
|
||||
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
|
||||
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
|
||||
<key>taskAppId</key>
|
||||
<logBase>${log.base}</logBase>
|
||||
</Discriminator>
|
||||
<sift>
|
||||
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
|
||||
<file>${log.base}/${taskAppId}.log</file>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
<append>true</append>
|
||||
</appender>
|
||||
</sift>
|
||||
</appender>
|
||||
<!-- master server logback config start -->
|
||||
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-master.log</file>
|
||||
<!--<filter class="org.apache.dolphinscheduler.server.log.MasterLogFilter">
|
||||
<level>INFO</level>
|
||||
</filter>-->
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- master server logback config end -->
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="MASTERLOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
@ -1,4 +1,4 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
@ -17,7 +17,8 @@
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds">
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
@ -27,11 +28,15 @@
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- worker server logback config start -->
|
||||
<conversionRule conversionWord="messsage"
|
||||
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
|
||||
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"></filter>
|
||||
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
|
||||
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
|
||||
<key>taskAppId</key>
|
||||
<logBase>${log.base}</logBase>
|
||||
@ -41,7 +46,7 @@
|
||||
<file>${log.base}/${taskAppId}.log</file>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
@ -49,31 +54,30 @@
|
||||
</appender>
|
||||
</sift>
|
||||
</appender>
|
||||
|
||||
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-worker.log</file>
|
||||
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
|
||||
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter"/>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
|
||||
</appender>
|
||||
|
||||
<!-- worker server logback config end -->
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="WORKERLOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
@ -25,7 +25,9 @@ DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
|
||||
# start postgresql
|
||||
initPostgreSQL() {
|
||||
echo "checking postgresql"
|
||||
if [ -n "$(ifconfig | grep ${POSTGRESQL_HOST})" ]; then
|
||||
if [[ "${POSTGRESQL_HOST}" = "127.0.0.1" || "${POSTGRESQL_HOST}" = "localhost" ]]; then
|
||||
export PGPORT=${POSTGRESQL_PORT}
|
||||
|
||||
echo "start postgresql service"
|
||||
rc-service postgresql restart
|
||||
|
||||
@ -47,10 +49,21 @@ initPostgreSQL() {
|
||||
sudo -u postgres psql -tAc "grant all privileges on database dolphinscheduler to ${POSTGRESQL_USERNAME}"
|
||||
fi
|
||||
|
||||
echo "test postgresql service"
|
||||
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 30 ]; then
|
||||
echo "Error: Couldn't connect to postgresql."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "connect postgresql service"
|
||||
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
|
||||
v=$(sudo -u postgres PGPASSWORD=${POSTGRESQL_PASSWORD} psql -h ${POSTGRESQL_HOST} -p ${POSTGRESQL_PORT} -U ${POSTGRESQL_USERNAME} -d dolphinscheduler -tAc "select 1")
|
||||
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
|
||||
echo "Can't connect to database...${v}"
|
||||
echo "Error: Can't connect to database...${v}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -70,10 +83,10 @@ initZK() {
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 30 ]; then
|
||||
log "Error: Couldn't connect to zookeeper."
|
||||
echo "Error: Couldn't connect to zookeeper."
|
||||
exit 1
|
||||
fi
|
||||
log "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
sleep 5
|
||||
done
|
||||
done
|
||||
|
@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.common.enums.AlertType;
|
||||
import org.apache.dolphinscheduler.common.enums.ShowType;
|
||||
import org.apache.dolphinscheduler.dao.entity.Alert;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
@ -54,11 +55,19 @@ public class EnterpriseWeChatUtilsTest {
|
||||
private static final String enterpriseWechatUsers="LiGang,journey";
|
||||
private static final String msg = "hello world";
|
||||
|
||||
private static final String enterpriseWechatTeamSendMsg = "{\\\"toparty\\\":\\\"$toParty\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"text\\\",\\\"text\\\":{\\\"content\\\":\\\"$msg\\\"},\\\"safe\\\":\\\"0\\\"}";
|
||||
private static final String enterpriseWechatUserSendMsg = "{\\\"touser\\\":\\\"$toUser\\\",\\\"agentid\\\":\\\"$agentId\\\",\\\"msgtype\\\":\\\"markdown\\\",\\\"markdown\\\":{\\\"content\\\":\\\"$msg\\\"}}";
|
||||
|
||||
@Before
|
||||
public void init(){
|
||||
PowerMockito.mockStatic(PropertyUtils.class);
|
||||
Mockito.when(PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE)).thenReturn(true);
|
||||
Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_USER_SEND_MSG)).thenReturn(enterpriseWechatUserSendMsg);
|
||||
Mockito.when(PropertyUtils.getString(Constants.ENTERPRISE_WECHAT_TEAM_SEND_MSG)).thenReturn(enterpriseWechatTeamSendMsg);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsEnable(){
|
||||
PowerMockito.mockStatic(PropertyUtils.class);
|
||||
Mockito.when(PropertyUtils.getBoolean(Constants.ENTERPRISE_WECHAT_ENABLE)).thenReturn(true);
|
||||
Boolean weChartEnable = EnterpriseWeChatUtils.isEnable();
|
||||
Assert.assertTrue(weChartEnable);
|
||||
}
|
||||
@ -88,6 +97,7 @@ public class EnterpriseWeChatUtilsTest {
|
||||
|
||||
@Test
|
||||
public void tesMakeUserSendMsg1(){
|
||||
|
||||
String sendMsg = EnterpriseWeChatUtils.makeUserSendMsg(enterpriseWechatUsers, enterpriseWechatAgentId, msg);
|
||||
Assert.assertTrue(sendMsg.contains(enterpriseWechatUsers));
|
||||
Assert.assertTrue(sendMsg.contains(enterpriseWechatAgentId));
|
||||
|
@ -1,67 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# For unit test
|
||||
|
||||
#alert type is EMAIL/SMS
|
||||
alert.type=EMAIL
|
||||
|
||||
# mail server configuration
|
||||
mail.protocol=SMTP
|
||||
mail.server.host=xxx.xxx.test
|
||||
mail.server.port=25
|
||||
mail.sender=xxx@xxx.com
|
||||
mail.user=xxx@xxx.com
|
||||
mail.passwd=111111
|
||||
|
||||
# Test double
|
||||
test.server.factor=3.0
|
||||
|
||||
|
||||
# Test NumberFormat
|
||||
test.server.testnumber=abc
|
||||
|
||||
# Test array
|
||||
test.server.list=xxx.xxx.test1,xxx.xxx.test2,xxx.xxx.test3
|
||||
|
||||
# Test enum
|
||||
test.server.enum1=MASTER
|
||||
test.server.enum2=DEAD_SERVER
|
||||
test.server.enum3=abc
|
||||
|
||||
# TLS
|
||||
mail.smtp.starttls.enable=true
|
||||
# SSL
|
||||
mail.smtp.ssl.enable=false
|
||||
mail.smtp.ssl.trust=xxx.xxx.com
|
||||
|
||||
#xls file path,need create if not exist
|
||||
xls.file.path=/tmp/xls
|
||||
|
||||
# Enterprise WeChat configuration
|
||||
enterprise.wechat.enable=false
|
||||
enterprise.wechat.corp.id=xxxxxxx
|
||||
enterprise.wechat.secret=xxxxxxx
|
||||
enterprise.wechat.agent.id=xxxxxxx
|
||||
enterprise.wechat.users=xxxxxxx
|
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
|
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
|
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
|
||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
|
||||
|
||||
|
||||
|
@ -94,6 +94,30 @@ public class ProcessDefinitionController extends BaseController {
|
||||
return returnDataList(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* copy process definition
|
||||
*
|
||||
* @param loginUser login user
|
||||
* @param projectName project name
|
||||
* @param processId process definition id
|
||||
* @return copy result code
|
||||
*/
|
||||
@ApiOperation(value = "copyProcessDefinition", notes= "COPY_PROCESS_DEFINITION_NOTES")
|
||||
@ApiImplicitParams({
|
||||
@ApiImplicitParam(name = "processId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100")
|
||||
})
|
||||
@PostMapping(value = "/copy")
|
||||
@ResponseStatus(HttpStatus.OK)
|
||||
@ApiException(COPY_PROCESS_DEFINITION_ERROR)
|
||||
public Result copyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
|
||||
@ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName,
|
||||
@RequestParam(value = "processId", required = true) int processId) throws JsonProcessingException {
|
||||
logger.info("copy process definition, login user:{}, project name:{}, process definition id:{}",
|
||||
loginUser.getUserName(), projectName, processId);
|
||||
Map<String, Object> result = processDefinitionService.copyProcessDefinition(loginUser, projectName, processId);
|
||||
return returnDataList(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* verify process definition name unique
|
||||
*
|
||||
|
@ -168,15 +168,13 @@ public enum Status {
|
||||
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error", "预览调度配置错误"),
|
||||
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error", "解析调度表达式错误"),
|
||||
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end", "开始时间不能和结束时间一样"),
|
||||
DELETE_TENANT_BY_ID_FAIL(100142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"),
|
||||
DELETE_TENANT_BY_ID_FAIL_DEFINES(100143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"),
|
||||
DELETE_TENANT_BY_ID_FAIL_USERS(100144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"),
|
||||
|
||||
DELETE_WORKER_GROUP_BY_ID_FAIL(100145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"),
|
||||
|
||||
QUERY_WORKER_GROUP_FAIL(100146,"query worker group fail ", "查询worker分组失败"),
|
||||
DELETE_WORKER_GROUP_FAIL(100147,"delete worker group fail ", "删除worker分组失败"),
|
||||
|
||||
DELETE_TENANT_BY_ID_FAIL(10142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"),
|
||||
DELETE_TENANT_BY_ID_FAIL_DEFINES(10143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"),
|
||||
DELETE_TENANT_BY_ID_FAIL_USERS(10144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"),
|
||||
DELETE_WORKER_GROUP_BY_ID_FAIL(10145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"),
|
||||
QUERY_WORKER_GROUP_FAIL(10146,"query worker group fail ", "查询worker分组失败"),
|
||||
DELETE_WORKER_GROUP_FAIL(10147,"delete worker group fail ", "删除worker分组失败"),
|
||||
COPY_PROCESS_DEFINITION_ERROR(10148,"copy process definition error", "复制工作流错误"),
|
||||
|
||||
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
|
||||
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),
|
||||
|
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.dolphinscheduler.api.exceptions;
|
||||
|
||||
import org.apache.dolphinscheduler.api.enums.Status;
|
||||
|
||||
|
||||
/**
|
||||
* service exception
|
||||
*/
|
||||
public class ServiceException extends RuntimeException {
|
||||
|
||||
/**
|
||||
* code
|
||||
*/
|
||||
private Integer code;
|
||||
|
||||
public ServiceException() {
|
||||
}
|
||||
|
||||
public ServiceException(Status status) {
|
||||
super(status.getMsg());
|
||||
this.code = status.getCode();
|
||||
}
|
||||
|
||||
public ServiceException(Integer code,String message) {
|
||||
super(message);
|
||||
this.code = code;
|
||||
}
|
||||
|
||||
public ServiceException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public Integer getCode() {
|
||||
return this.code;
|
||||
}
|
||||
|
||||
public void setCode(Integer code) {
|
||||
this.code = code;
|
||||
}
|
||||
}
|
@ -65,25 +65,24 @@ public class LoggerService {
|
||||
|
||||
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
|
||||
|
||||
if (taskInstance == null){
|
||||
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
|
||||
}
|
||||
|
||||
String host = Host.of(taskInstance.getHost()).getIp();
|
||||
if(StringUtils.isEmpty(host)){
|
||||
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
|
||||
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
|
||||
}
|
||||
|
||||
String host = getHost(taskInstance.getHost());
|
||||
|
||||
Result result = new Result(Status.SUCCESS.getCode(), Status.SUCCESS.getMsg());
|
||||
|
||||
logger.info("log host : {} , logPath : {} , logServer port : {}",host,taskInstance.getLogPath(),Constants.RPC_PORT);
|
||||
|
||||
String log = logClient.rollViewLog(host, Constants.RPC_PORT, taskInstance.getLogPath(),skipLineNum,limit);
|
||||
result.setData(log);
|
||||
logger.info(log);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* get log size
|
||||
*
|
||||
@ -92,10 +91,24 @@ public class LoggerService {
|
||||
*/
|
||||
public byte[] getLogBytes(int taskInstId) {
|
||||
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstId);
|
||||
if (taskInstance == null){
|
||||
throw new RuntimeException("task instance is null");
|
||||
if (taskInstance == null || StringUtils.isBlank(taskInstance.getHost())){
|
||||
throw new RuntimeException("task instance is null or host is null");
|
||||
}
|
||||
String host = Host.of(taskInstance.getHost()).getIp();
|
||||
String host = getHost(taskInstance.getHost());
|
||||
|
||||
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* get host
|
||||
* @param address address
|
||||
* @return old version return true ,otherwise return false
|
||||
*/
|
||||
private String getHost(String address){
|
||||
if (Host.isOldVersion(address)){
|
||||
return address;
|
||||
}
|
||||
return Host.of(address).getIp();
|
||||
}
|
||||
}
|
||||
|
@ -112,8 +112,13 @@ public class ProcessDefinitionService extends BaseDAGService {
|
||||
* @return create result code
|
||||
* @throws JsonProcessingException JsonProcessingException
|
||||
*/
|
||||
public Map<String, Object> createProcessDefinition(User loginUser, String projectName, String name,
|
||||
String processDefinitionJson, String desc, String locations, String connects) throws JsonProcessingException {
|
||||
public Map<String, Object> createProcessDefinition(User loginUser,
|
||||
String projectName,
|
||||
String name,
|
||||
String processDefinitionJson,
|
||||
String desc,
|
||||
String locations,
|
||||
String connects) throws JsonProcessingException {
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
Project project = projectMapper.queryByName(projectName);
|
||||
@ -281,6 +286,41 @@ public class ProcessDefinitionService extends BaseDAGService {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* copy process definition
|
||||
*
|
||||
* @param loginUser login user
|
||||
* @param projectName project name
|
||||
* @param processId process definition id
|
||||
* @return copy result code
|
||||
*/
|
||||
public Map<String, Object> copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException{
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
Project project = projectMapper.queryByName(projectName);
|
||||
|
||||
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
|
||||
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
|
||||
if (resultStatus != Status.SUCCESS) {
|
||||
return checkResult;
|
||||
}
|
||||
|
||||
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
|
||||
if (processDefinition == null) {
|
||||
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
|
||||
return result;
|
||||
} else {
|
||||
return createProcessDefinition(
|
||||
loginUser,
|
||||
projectName,
|
||||
processDefinition.getName()+"_copy_"+System.currentTimeMillis(),
|
||||
processDefinition.getProcessDefinitionJson(),
|
||||
processDefinition.getDescription(),
|
||||
processDefinition.getLocations(),
|
||||
processDefinition.getConnects());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* update process definition
|
||||
*
|
||||
|
@ -26,6 +26,7 @@ import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
|
||||
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
|
||||
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
|
||||
import org.apache.dolphinscheduler.api.enums.Status;
|
||||
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
|
||||
import org.apache.dolphinscheduler.api.utils.PageInfo;
|
||||
import org.apache.dolphinscheduler.api.utils.Result;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
@ -44,8 +45,10 @@ import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.*;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.dolphinscheduler.common.Constants.*;
|
||||
@ -234,9 +237,6 @@ public class ResourcesService extends BaseService {
|
||||
}
|
||||
|
||||
Date now = new Date();
|
||||
|
||||
|
||||
|
||||
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
|
||||
|
||||
try {
|
||||
@ -317,7 +317,6 @@ public class ResourcesService extends BaseService {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
|
||||
putMsg(result, Status.SUCCESS);
|
||||
return result;
|
||||
@ -325,9 +324,10 @@ public class ResourcesService extends BaseService {
|
||||
|
||||
//check resource aleady exists
|
||||
String originFullName = resource.getFullName();
|
||||
String originResourceName = resource.getAlias();
|
||||
|
||||
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
|
||||
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
|
||||
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
|
||||
logger.error("resource {} already exists, can't recreate", name);
|
||||
putMsg(result, Status.RESOURCE_EXIST);
|
||||
return result;
|
||||
@ -338,11 +338,24 @@ public class ResourcesService extends BaseService {
|
||||
if (StringUtils.isEmpty(tenantCode)){
|
||||
return result;
|
||||
}
|
||||
// verify whether the resource exists in storage
|
||||
// get the path of origin file in storage
|
||||
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
|
||||
try {
|
||||
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
|
||||
logger.error("{} not exist", originHdfsFileName);
|
||||
putMsg(result,Status.RESOURCE_NOT_EXIST);
|
||||
return result;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error(e.getMessage(),e);
|
||||
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
|
||||
}
|
||||
|
||||
String nameWithSuffix = name;
|
||||
String originResourceName = resource.getAlias();
|
||||
|
||||
if (!resource.isDirectory()) {
|
||||
//get the file suffix
|
||||
|
||||
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
|
||||
|
||||
//if the name without suffix then add it ,else use the origin name
|
||||
@ -352,7 +365,7 @@ public class ResourcesService extends BaseService {
|
||||
}
|
||||
|
||||
// updateResource data
|
||||
List<Integer> childrenResource = listAllChildren(resource);
|
||||
List<Integer> childrenResource = listAllChildren(resource,false);
|
||||
String oldFullName = resource.getFullName();
|
||||
Date now = new Date();
|
||||
|
||||
@ -364,10 +377,11 @@ public class ResourcesService extends BaseService {
|
||||
try {
|
||||
resourcesMapper.updateById(resource);
|
||||
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
|
||||
String matcherFullName = Matcher.quoteReplacement(fullName);
|
||||
List<Resource> childResourceList = new ArrayList<>();
|
||||
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
|
||||
childResourceList = resourceList.stream().map(t -> {
|
||||
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
|
||||
t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName));
|
||||
t.setUpdateTime(now);
|
||||
return t;
|
||||
}).collect(Collectors.toList());
|
||||
@ -385,29 +399,24 @@ public class ResourcesService extends BaseService {
|
||||
result.setData(resultMap);
|
||||
} catch (Exception e) {
|
||||
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
|
||||
throw new RuntimeException(Status.UPDATE_RESOURCE_ERROR.getMsg());
|
||||
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
|
||||
}
|
||||
// if name unchanged, return directly without moving on HDFS
|
||||
if (originResourceName.equals(name)) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// get file hdfs path
|
||||
// delete hdfs file by type
|
||||
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
|
||||
// get the path of dest file in hdfs
|
||||
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
|
||||
|
||||
|
||||
try {
|
||||
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
|
||||
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
|
||||
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
|
||||
} else {
|
||||
logger.error("{} not exist", originHdfsFileName);
|
||||
putMsg(result,Status.RESOURCE_NOT_EXIST);
|
||||
}
|
||||
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
|
||||
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
|
||||
} catch (Exception e) {
|
||||
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
|
||||
putMsg(result,Status.HDFS_COPY_FAIL);
|
||||
throw new ServiceException(Status.HDFS_COPY_FAIL);
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -542,34 +551,6 @@ public class ResourcesService extends BaseService {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* get all resources
|
||||
* @param loginUser login user
|
||||
* @return all resource set
|
||||
*/
|
||||
/*private Set<Resource> getAllResources(User loginUser, ResourceType type) {
|
||||
int userId = loginUser.getId();
|
||||
boolean listChildren = true;
|
||||
if(isAdmin(loginUser)){
|
||||
userId = 0;
|
||||
listChildren = false;
|
||||
}
|
||||
List<Resource> resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
|
||||
Set<Resource> allResourceList = new HashSet<>(resourceList);
|
||||
if (listChildren) {
|
||||
Set<Integer> authorizedIds = new HashSet<>();
|
||||
List<Resource> authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
|
||||
if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
|
||||
for(Resource resource : authorizedDirecoty){
|
||||
authorizedIds.addAll(listAllChildren(resource));
|
||||
}
|
||||
List<Resource> childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
|
||||
allResourceList.addAll(childrenResources);
|
||||
}
|
||||
}
|
||||
return allResourceList;
|
||||
}*/
|
||||
|
||||
/**
|
||||
* query resource list
|
||||
*
|
||||
@ -580,8 +561,11 @@ public class ResourcesService extends BaseService {
|
||||
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
|
||||
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(loginUser.getId(), type.ordinal(),0);
|
||||
int userId = loginUser.getId();
|
||||
if(isAdmin(loginUser)){
|
||||
userId = 0;
|
||||
}
|
||||
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
|
||||
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
|
||||
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
|
||||
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
|
||||
@ -631,7 +615,7 @@ public class ResourcesService extends BaseService {
|
||||
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
|
||||
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
|
||||
// get all children of the resource
|
||||
List<Integer> allChildren = listAllChildren(resource);
|
||||
List<Integer> allChildren = listAllChildren(resource,true);
|
||||
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
|
||||
|
||||
//if resource type is UDF,need check whether it is bound by UDF functon
|
||||
@ -1193,12 +1177,13 @@ public class ResourcesService extends BaseService {
|
||||
|
||||
/**
|
||||
* list all children id
|
||||
* @param resource resource
|
||||
* @param resource resource
|
||||
* @param containSelf whether add self to children list
|
||||
* @return all children id
|
||||
*/
|
||||
List<Integer> listAllChildren(Resource resource){
|
||||
List<Integer> listAllChildren(Resource resource,boolean containSelf){
|
||||
List<Integer> childList = new ArrayList<>();
|
||||
if (resource.getId() != -1) {
|
||||
if (resource.getId() != -1 && containSelf) {
|
||||
childList.add(resource.getId());
|
||||
}
|
||||
|
||||
|
@ -173,6 +173,7 @@ PROCESS_DEFINITION_ID=process definition id
|
||||
PROCESS_DEFINITION_IDS=process definition ids
|
||||
RELEASE_PROCESS_DEFINITION_NOTES=release process definition
|
||||
QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id
|
||||
COPY_PROCESS_DEFINITION_NOTES=copy process definition
|
||||
QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list
|
||||
QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
|
||||
|
@ -173,6 +173,7 @@ PROCESS_DEFINITION_ID=process definition id
|
||||
PROCESS_DEFINITION_IDS=process definition ids
|
||||
RELEASE_PROCESS_DEFINITION_NOTES=release process definition
|
||||
QUERY_PROCESS_DEFINITION_BY_ID_NOTES=query process definition by id
|
||||
COPY_PROCESS_DEFINITION_NOTES=copy process definition
|
||||
QUERY_PROCESS_DEFINITION_LIST_NOTES=query process definition list
|
||||
QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=query process definition list paging
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
|
||||
|
@ -171,6 +171,7 @@ UPDATE_PROCESS_DEFINITION_NOTES=更新流程定义
|
||||
PROCESS_DEFINITION_ID=流程定义ID
|
||||
RELEASE_PROCESS_DEFINITION_NOTES=发布流程定义
|
||||
QUERY_PROCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
|
||||
COPY_PROCESS_DEFINITION_NOTES=复制流程定义
|
||||
QUERY_PROCESS_DEFINITION_LIST_NOTES=查询流程定义列表
|
||||
QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
|
||||
|
@ -174,6 +174,21 @@ public class ProcessDefinitionControllerTest{
|
||||
Assert.assertEquals(Status.SUCCESS.getCode(),response.getCode().intValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCopyProcessDefinition() throws Exception {
|
||||
|
||||
String projectName = "test";
|
||||
int id = 1;
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
putMsg(result, Status.SUCCESS);
|
||||
|
||||
Mockito.when(processDefinitionService.copyProcessDefinition(user, projectName,id)).thenReturn(result);
|
||||
Result response = processDefinitionController.copyProcessDefinition(user, projectName,id);
|
||||
|
||||
Assert.assertEquals(Status.SUCCESS.getCode(),response.getCode().intValue());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testQueryProcessDefinitionList() throws Exception {
|
||||
|
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.dolphinscheduler.api.exceptions;
|
||||
|
||||
import org.apache.dolphinscheduler.api.enums.Status;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public class ServiceExceptionTest {
|
||||
@Test
|
||||
public void getCodeTest(){
|
||||
ServiceException serviceException = new ServiceException();
|
||||
Assert.assertNull(serviceException.getCode());
|
||||
|
||||
serviceException = new ServiceException(Status.ALERT_GROUP_EXIST);
|
||||
Assert.assertNotNull(serviceException.getCode());
|
||||
|
||||
serviceException = new ServiceException(10012, "alarm group already exists");
|
||||
Assert.assertNotNull(serviceException.getCode());
|
||||
}
|
||||
@Test
|
||||
public void getMessageTest(){
|
||||
ServiceException serviceException = new ServiceException();
|
||||
Assert.assertNull(serviceException.getMessage());
|
||||
|
||||
serviceException = new ServiceException(Status.ALERT_GROUP_EXIST);
|
||||
Assert.assertNotNull(serviceException.getMessage());
|
||||
|
||||
serviceException = new ServiceException(10012, "alarm group already exists");
|
||||
Assert.assertNotNull(serviceException.getMessage());
|
||||
}
|
||||
}
|
@ -198,6 +198,47 @@ public class ProcessDefinitionServiceTest {
|
||||
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCopyProcessDefinition() throws Exception{
|
||||
String projectName = "project_test1";
|
||||
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
|
||||
|
||||
Project project = getProject(projectName);
|
||||
|
||||
User loginUser = new User();
|
||||
loginUser.setId(-1);
|
||||
loginUser.setUserType(UserType.GENERAL_USER);
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
//project check auth success, instance not exist
|
||||
putMsg(result, Status.SUCCESS, projectName);
|
||||
Mockito.when(projectService.checkProjectAndAuth(loginUser,project,projectName)).thenReturn(result);
|
||||
|
||||
ProcessDefinition definition = getProcessDefinition();
|
||||
definition.setLocations("{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}");
|
||||
definition.setProcessDefinitionJson("{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\":\"ssh_test1\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\necho ${aa}\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},\"taskInstancePriority\":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}");
|
||||
definition.setConnects("[]");
|
||||
//instance exit
|
||||
Mockito.when(processDefineMapper.selectById(46)).thenReturn(definition);
|
||||
|
||||
Map<String, Object> createProcessResult = new HashMap<>(5);
|
||||
putMsg(result, Status.SUCCESS);
|
||||
|
||||
Mockito.when(processDefinitionService.createProcessDefinition(
|
||||
loginUser,
|
||||
definition.getProjectName(),
|
||||
definition.getName(),
|
||||
definition.getProcessDefinitionJson(),
|
||||
definition.getDescription(),
|
||||
definition.getLocations(),
|
||||
definition.getConnects())).thenReturn(createProcessResult);
|
||||
|
||||
Map<String, Object> successRes = processDefinitionService.copyProcessDefinition(loginUser,
|
||||
"project_test1", 46);
|
||||
|
||||
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void deleteProcessDefinitionByIdTest() throws Exception {
|
||||
String projectName = "project_test1";
|
||||
@ -770,12 +811,14 @@ public class ProcessDefinitionServiceTest {
|
||||
* @return ProcessDefinition
|
||||
*/
|
||||
private ProcessDefinition getProcessDefinition(){
|
||||
|
||||
ProcessDefinition processDefinition = new ProcessDefinition();
|
||||
processDefinition.setId(46);
|
||||
processDefinition.setName("test_pdf");
|
||||
processDefinition.setProjectId(2);
|
||||
processDefinition.setTenantId(1);
|
||||
processDefinition.setDescription("");
|
||||
|
||||
return processDefinition;
|
||||
}
|
||||
|
||||
|
@ -19,12 +19,16 @@ package org.apache.dolphinscheduler.api.service;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import org.apache.dolphinscheduler.api.enums.Status;
|
||||
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
|
||||
import org.apache.dolphinscheduler.api.utils.PageInfo;
|
||||
import org.apache.dolphinscheduler.api.utils.Result;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.enums.ResourceType;
|
||||
import org.apache.dolphinscheduler.common.enums.UserType;
|
||||
import org.apache.dolphinscheduler.common.utils.*;
|
||||
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.FileUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.Resource;
|
||||
import org.apache.dolphinscheduler.dao.entity.Tenant;
|
||||
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
|
||||
@ -37,7 +41,6 @@ import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.omg.CORBA.Any;
|
||||
import org.powermock.api.mockito.PowerMockito;
|
||||
import org.powermock.core.classloader.annotations.PowerMockIgnore;
|
||||
import org.powermock.core.classloader.annotations.PrepareForTest;
|
||||
@ -172,10 +175,29 @@ public class ResourcesServiceTest {
|
||||
logger.info(result.toString());
|
||||
Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(),result.getMsg());
|
||||
|
||||
//RESOURCE_NOT_EXIST
|
||||
user.setId(1);
|
||||
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
|
||||
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
|
||||
PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(),Mockito.anyString())).thenReturn("test1");
|
||||
|
||||
try {
|
||||
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false);
|
||||
} catch (IOException e) {
|
||||
logger.error(e.getMessage(),e);
|
||||
}
|
||||
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF);
|
||||
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
|
||||
|
||||
//SUCCESS
|
||||
user.setId(1);
|
||||
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
|
||||
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
|
||||
try {
|
||||
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true);
|
||||
} catch (IOException e) {
|
||||
logger.error(e.getMessage(),e);
|
||||
}
|
||||
|
||||
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
|
||||
logger.info(result.toString());
|
||||
@ -199,21 +221,16 @@ public class ResourcesServiceTest {
|
||||
logger.info(result.toString());
|
||||
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
|
||||
|
||||
//RESOURCE_NOT_EXIST
|
||||
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
|
||||
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
|
||||
|
||||
try {
|
||||
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
|
||||
logger.info(result.toString());
|
||||
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
|
||||
|
||||
//SUCCESS
|
||||
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
|
||||
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
|
||||
try {
|
||||
PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(),Mockito.anyString(),true,true)).thenReturn(true);
|
||||
} catch (Exception e) {
|
||||
logger.error(e.getMessage(),e);
|
||||
}
|
||||
|
||||
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
|
||||
logger.info(result.toString());
|
||||
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
|
||||
|
@ -503,6 +503,9 @@ public class HadoopUtils implements Closeable {
|
||||
* @return hdfs file name
|
||||
*/
|
||||
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
|
||||
if (fileName.startsWith("/")) {
|
||||
fileName = fileName.replaceFirst("/","");
|
||||
}
|
||||
return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName);
|
||||
}
|
||||
|
||||
@ -514,6 +517,9 @@ public class HadoopUtils implements Closeable {
|
||||
* @return get absolute path and name for file on hdfs
|
||||
*/
|
||||
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
|
||||
if (fileName.startsWith("/")) {
|
||||
fileName = fileName.replaceFirst("/","");
|
||||
}
|
||||
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
|
||||
}
|
||||
|
||||
@ -525,6 +531,9 @@ public class HadoopUtils implements Closeable {
|
||||
* @return get absolute path and name for udf file on hdfs
|
||||
*/
|
||||
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
|
||||
if (fileName.startsWith("/")) {
|
||||
fileName = fileName.replaceFirst("/","");
|
||||
}
|
||||
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
|
||||
}
|
||||
|
||||
|
@ -127,6 +127,18 @@ public class HadoopUtilsTest {
|
||||
Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getHdfsResourceFileName() {
|
||||
String result = hadoopUtils.getHdfsResourceFileName("11000","aa.txt");
|
||||
Assert.assertEquals("/dolphinscheduler/11000/resources/aa.txt", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void getHdfsUdfFileName() {
|
||||
String result = hadoopUtils.getHdfsFileName(ResourceType.UDF,"11000","aa.txt");
|
||||
Assert.assertEquals("/dolphinscheduler/11000/udfs/aa.txt", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void isYarnEnabled() {
|
||||
boolean result = hadoopUtils.isYarnEnabled();
|
||||
|
@ -91,6 +91,16 @@ public class Host implements Serializable {
|
||||
return host;
|
||||
}
|
||||
|
||||
/**
|
||||
* whether old version
|
||||
* @param address address
|
||||
* @return old version is true , otherwise is false
|
||||
*/
|
||||
public static Boolean isOldVersion(String address){
|
||||
String[] parts = address.split(":");
|
||||
return parts.length != 2 ? true : false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
|
@ -117,9 +117,12 @@ public class MasterServer {
|
||||
this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor());
|
||||
this.nettyRemotingServer.start();
|
||||
|
||||
//
|
||||
this.zkMasterClient.start();
|
||||
// register
|
||||
this.masterRegistry.registry();
|
||||
|
||||
// self tolerant
|
||||
this.zkMasterClient.start();
|
||||
|
||||
//
|
||||
masterSchedulerService.start();
|
||||
|
||||
|
@ -48,13 +48,13 @@ import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.annotation.PostConstruct;
|
||||
import java.util.HashSet;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.apache.dolphinscheduler.common.Constants.*;
|
||||
import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS;
|
||||
|
||||
/**
|
||||
* TaskUpdateQueue consumer
|
||||
@ -328,36 +328,38 @@ public class TaskPriorityQueueConsumer extends Thread{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* create project resource files
|
||||
* get resource full name list
|
||||
*/
|
||||
private List<String> getResourceFullNames(TaskNode taskNode){
|
||||
|
||||
Set<Integer> resourceIdsSet = new HashSet<>();
|
||||
private List<String> getResourceFullNames(TaskNode taskNode) {
|
||||
List<String> resourceFullNameList = new ArrayList<>();
|
||||
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
|
||||
|
||||
if (baseParam != null) {
|
||||
List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList();
|
||||
if (projectResourceFiles != null) {
|
||||
Stream<Integer> resourceInfotream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId());
|
||||
resourceIdsSet.addAll(resourceInfotream.collect(Collectors.toSet()));
|
||||
|
||||
// filter the resources that the resource id equals 0
|
||||
Set<ResourceInfo> oldVersionResources = projectResourceFiles.stream().filter(t -> t.getId() == 0).collect(Collectors.toSet());
|
||||
if (CollectionUtils.isNotEmpty(oldVersionResources)) {
|
||||
resourceFullNameList.addAll(oldVersionResources.stream().map(resource -> resource.getRes()).collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
// get the resource id in order to get the resource names in batch
|
||||
Stream<Integer> resourceIdStream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId());
|
||||
Set<Integer> resourceIdsSet = resourceIdStream.collect(Collectors.toSet());
|
||||
|
||||
if (CollectionUtils.isNotEmpty(resourceIdsSet)) {
|
||||
Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]);
|
||||
|
||||
List<Resource> resources = processService.listResourceByIds(resourceIds);
|
||||
resourceFullNameList.addAll(resources.stream()
|
||||
.map(resourceInfo -> resourceInfo.getFullName())
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (CollectionUtils.isEmpty(resourceIdsSet)){
|
||||
return null;
|
||||
}
|
||||
|
||||
Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]);
|
||||
|
||||
List<Resource> resources = processService.listResourceByIds(resourceIds);
|
||||
|
||||
List<String> resourceFullNames = resources.stream()
|
||||
.map(resourceInfo -> resourceInfo.getFullName())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return resourceFullNames;
|
||||
return resourceFullNameList;
|
||||
}
|
||||
}
|
||||
|
@ -16,7 +16,6 @@
|
||||
*/
|
||||
package org.apache.dolphinscheduler.server.worker.task.sql;
|
||||
|
||||
import com.alibaba.fastjson.JSON;
|
||||
import com.alibaba.fastjson.JSONArray;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import com.alibaba.fastjson.serializer.SerializerFeature;
|
||||
@ -24,7 +23,6 @@ import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.dolphinscheduler.alert.utils.MailUtils;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.enums.*;
|
||||
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
|
||||
import org.apache.dolphinscheduler.common.enums.DbType;
|
||||
import org.apache.dolphinscheduler.common.enums.ShowType;
|
||||
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
|
||||
@ -37,7 +35,6 @@ import org.apache.dolphinscheduler.common.utils.*;
|
||||
import org.apache.dolphinscheduler.dao.AlertDao;
|
||||
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
|
||||
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
|
||||
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
|
||||
import org.apache.dolphinscheduler.dao.entity.User;
|
||||
import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext;
|
||||
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
|
||||
@ -78,6 +75,10 @@ public class SqlTask extends AbstractTask {
|
||||
*/
|
||||
private TaskExecutionContext taskExecutionContext;
|
||||
|
||||
/**
|
||||
* default query sql limit
|
||||
*/
|
||||
private static final int LIMIT = 10000;
|
||||
|
||||
public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger) {
|
||||
super(taskExecutionContext, logger);
|
||||
@ -257,12 +258,15 @@ public class SqlTask extends AbstractTask {
|
||||
ResultSetMetaData md = resultSet.getMetaData();
|
||||
int num = md.getColumnCount();
|
||||
|
||||
while (resultSet.next()) {
|
||||
int rowCount = 0;
|
||||
|
||||
while (rowCount < LIMIT && resultSet.next()) {
|
||||
JSONObject mapOfColValues = new JSONObject(true);
|
||||
for (int i = 1; i <= num; i++) {
|
||||
mapOfColValues.put(md.getColumnName(i), resultSet.getObject(i));
|
||||
}
|
||||
resultJSONArray.add(mapOfColValues);
|
||||
rowCount++;
|
||||
}
|
||||
logger.debug("execute sql : {}", JSONObject.toJSONString(resultJSONArray, SerializerFeature.WriteMapNullValue));
|
||||
|
||||
|
@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
|
||||
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
|
||||
import org.apache.dolphinscheduler.common.model.Server;
|
||||
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.OSUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
|
||||
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
|
||||
@ -40,6 +41,8 @@ import org.springframework.stereotype.Component;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.dolphinscheduler.common.Constants.*;
|
||||
|
||||
|
||||
/**
|
||||
* zookeeper master client
|
||||
@ -72,8 +75,13 @@ public class ZKMasterClient extends AbstractZKClient {
|
||||
// init system znode
|
||||
this.initSystemZNode();
|
||||
|
||||
// check if fault tolerance is required?failure and tolerance
|
||||
if (getActiveMasterNum() == 1 && checkZKNodeExists(OSUtils.getHost(), ZKNodeType.MASTER)) {
|
||||
while (!checkZKNodeExists(OSUtils.getHost(), ZKNodeType.MASTER)){
|
||||
ThreadUtils.sleep(SLEEP_TIME_MILLIS);
|
||||
}
|
||||
|
||||
|
||||
// self tolerant
|
||||
if (getActiveMasterNum() == 1) {
|
||||
failoverWorker(null, true);
|
||||
failoverMaster(null);
|
||||
}
|
||||
@ -147,7 +155,7 @@ public class ZKMasterClient extends AbstractZKClient {
|
||||
* @throws Exception exception
|
||||
*/
|
||||
private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception {
|
||||
if(StringUtils.isEmpty(serverHost)){
|
||||
if(StringUtils.isEmpty(serverHost) || serverHost.startsWith(OSUtils.getHost())){
|
||||
return ;
|
||||
}
|
||||
switch (zkNodeType){
|
||||
|
@ -133,8 +133,6 @@ public class TaskCallbackServiceTest {
|
||||
nettyRemotingClient.close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testSendAckWithIllegalArgumentException(){
|
||||
TaskExecuteAckCommand ackCommand = Mockito.mock(TaskExecuteAckCommand.class);
|
||||
@ -178,39 +176,40 @@ public class TaskCallbackServiceTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(expected = IllegalStateException.class)
|
||||
public void testSendAckWithIllegalStateException2(){
|
||||
masterRegistry.registry();
|
||||
final NettyServerConfig serverConfig = new NettyServerConfig();
|
||||
serverConfig.setListenPort(30000);
|
||||
NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig);
|
||||
nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor);
|
||||
nettyRemotingServer.start();
|
||||
// @Test(expected = IllegalStateException.class)
|
||||
// public void testSendAckWithIllegalStateException2(){
|
||||
// masterRegistry.registry();
|
||||
// final NettyServerConfig serverConfig = new NettyServerConfig();
|
||||
// serverConfig.setListenPort(30000);
|
||||
// NettyRemotingServer nettyRemotingServer = new NettyRemotingServer(serverConfig);
|
||||
// nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, taskAckProcessor);
|
||||
// nettyRemotingServer.start();
|
||||
//
|
||||
// final NettyClientConfig clientConfig = new NettyClientConfig();
|
||||
// NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig);
|
||||
// Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000"));
|
||||
// taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1));
|
||||
// channel.close();
|
||||
// TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
|
||||
// ackCommand.setTaskInstanceId(1);
|
||||
// ackCommand.setStartTime(new Date());
|
||||
//
|
||||
// nettyRemotingServer.close();
|
||||
//
|
||||
// taskCallbackService.sendAck(1, ackCommand.convert2Command());
|
||||
// try {
|
||||
// Thread.sleep(5000);
|
||||
// } catch (InterruptedException e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
//
|
||||
// Stopper.stop();
|
||||
//
|
||||
// try {
|
||||
// Thread.sleep(5000);
|
||||
// } catch (InterruptedException e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
// }
|
||||
|
||||
final NettyClientConfig clientConfig = new NettyClientConfig();
|
||||
NettyRemotingClient nettyRemotingClient = new NettyRemotingClient(clientConfig);
|
||||
Channel channel = nettyRemotingClient.getChannel(Host.of("localhost:30000"));
|
||||
taskCallbackService.addRemoteChannel(1, new NettyRemoteChannel(channel, 1));
|
||||
channel.close();
|
||||
TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand();
|
||||
ackCommand.setTaskInstanceId(1);
|
||||
ackCommand.setStartTime(new Date());
|
||||
|
||||
nettyRemotingServer.close();
|
||||
|
||||
taskCallbackService.sendAck(1, ackCommand.convert2Command());
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
Stopper.stop();
|
||||
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -119,6 +119,10 @@ public class ProcessService {
|
||||
logger.info("there is not enough thread for this command: {}", command);
|
||||
return setWaitingThreadProcess(command, processInstance);
|
||||
}
|
||||
if (processInstance.getCommandType().equals(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS)){
|
||||
delCommandByid(command.getId());
|
||||
return null;
|
||||
}
|
||||
processInstance.setCommandType(command.getCommandType());
|
||||
processInstance.addHistoryCmd(command.getCommandType());
|
||||
saveProcessInstance(processInstance);
|
||||
|
@ -48,7 +48,7 @@
|
||||
<m-list-box>
|
||||
<div slot="text">{{$t('Main jar package')}}</div>
|
||||
<div slot="content">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :placeholder="$t('Please enter main jar package')">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :disabled="isDetails" :placeholder="$t('Please enter main jar package')">
|
||||
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
|
||||
</treeselect>
|
||||
</div>
|
||||
@ -557,4 +557,12 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
.vue-treeselect--disabled {
|
||||
.vue-treeselect__control {
|
||||
background-color: #ecf3f8;
|
||||
.vue-treeselect__single-value {
|
||||
color: #6d859e;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
@ -44,7 +44,7 @@
|
||||
<m-list-box>
|
||||
<div slot="text">{{$t('Main jar package')}}</div>
|
||||
<div slot="content">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :value-consists-of="valueConsistsOf" :placeholder="$t('Please enter main jar package')">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :value-consists-of="valueConsistsOf" :disabled="isDetails" :placeholder="$t('Please enter main jar package')">
|
||||
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
|
||||
</treeselect>
|
||||
</div>
|
||||
@ -427,4 +427,12 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
.vue-treeselect--disabled {
|
||||
.vue-treeselect__control {
|
||||
background-color: #ecf3f8;
|
||||
.vue-treeselect__single-value {
|
||||
color: #6d859e;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
@ -333,3 +333,13 @@
|
||||
components: { mLocalParams, mListBox, mResources,Treeselect }
|
||||
}
|
||||
</script>
|
||||
<style lang="scss" rel="stylesheet/scss" scope>
|
||||
.vue-treeselect--disabled {
|
||||
.vue-treeselect__control {
|
||||
background-color: #ecf3f8;
|
||||
.vue-treeselect__single-value {
|
||||
color: #6d859e;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
@ -396,5 +396,12 @@
|
||||
right: -12px;
|
||||
top: -16px;
|
||||
}
|
||||
|
||||
.vue-treeselect--disabled {
|
||||
.vue-treeselect__control {
|
||||
background-color: #ecf3f8;
|
||||
.vue-treeselect__single-value {
|
||||
color: #6d859e;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
@ -63,7 +63,7 @@
|
||||
<m-list-box>
|
||||
<div slot="text">{{$t('Main jar package')}}</div>
|
||||
<div slot="content">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :placeholder="$t('Please enter main jar package')">
|
||||
<treeselect v-model="mainJar" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :disabled="isDetails" :placeholder="$t('Please enter main jar package')">
|
||||
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
|
||||
</treeselect>
|
||||
</div>
|
||||
@ -606,4 +606,12 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
.vue-treeselect--disabled {
|
||||
.vue-treeselect__control {
|
||||
background-color: #ecf3f8;
|
||||
.vue-treeselect__single-value {
|
||||
color: #6d859e;
|
||||
}
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
@ -37,7 +37,7 @@
|
||||
</div>
|
||||
<div v-if="sqlType==0" style="display: inline-block;padding-left: 10px;margin-top: 2px;">
|
||||
<x-checkbox-group v-model="showType">
|
||||
<x-checkbox :label="'TABLE'" :disabled="isDetails">{{$t('Table')}}</x-checkbox>
|
||||
<x-checkbox :label="'TABLE'" :disabled="isDetails">{{$t('TableMode')}}</x-checkbox>
|
||||
<x-checkbox :label="'ATTACHMENT'" :disabled="isDetails">{{$t('Attachment')}}</x-checkbox>
|
||||
</x-checkbox-group>
|
||||
</div>
|
||||
|
@ -43,7 +43,7 @@
|
||||
props: {},
|
||||
methods: {
|
||||
...mapMutations('dag', ['setIsDetails', 'resetParams']),
|
||||
...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getInstancedetail']),
|
||||
...mapActions('dag', ['getProcessList','getProjectList', 'getResourcesList', 'getInstancedetail','getResourcesListJar']),
|
||||
...mapActions('security', ['getTenantList','getWorkerGroupsAll']),
|
||||
/**
|
||||
* init
|
||||
@ -62,6 +62,8 @@
|
||||
this.getProjectList(),
|
||||
// get resources
|
||||
this.getResourcesList(),
|
||||
// get jar
|
||||
this.getResourcesListJar(),
|
||||
// get worker group list
|
||||
this.getWorkerGroupsAll(),
|
||||
this.getTenantList()
|
||||
|
@ -46,7 +46,7 @@
|
||||
<th scope="col" width="90">
|
||||
<span>{{$t('Timing state')}}</span>
|
||||
</th>
|
||||
<th scope="col" width="240">
|
||||
<th scope="col" width="300">
|
||||
<span>{{$t('Operation')}}</span>
|
||||
</th>
|
||||
</tr>
|
||||
@ -90,6 +90,7 @@
|
||||
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Timing')" @click="_timing(item)" :disabled="item.releaseState !== 'ONLINE' || item.scheduleReleaseState !== null" icon="ans-icon-timer"><!--{{$t('定时')}}--></x-button>
|
||||
<x-button type="warning" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('online')" @click="_poponline(item)" v-if="item.releaseState === 'OFFLINE'" icon="ans-icon-upward"><!--{{$t('下线')}}--></x-button>
|
||||
<x-button type="error" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('offline')" @click="_downline(item)" v-if="item.releaseState === 'ONLINE'" icon="ans-icon-downward"><!--{{$t('上线')}}--></x-button>
|
||||
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Copy')" @click="_copyProcess(item)" :disabled="item.releaseState === 'ONLINE'" icon="ans-icon-copy"><!--{{$t('复制')}}--></x-button>
|
||||
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Cron Manage')" @click="_timingManage(item)" :disabled="item.releaseState !== 'ONLINE'" icon="ans-icon-datetime"><!--{{$t('定时管理')}}--></x-button>
|
||||
<x-poptip
|
||||
:ref="'poptip-delete-' + $index"
|
||||
@ -158,7 +159,7 @@
|
||||
pageSize: Number
|
||||
},
|
||||
methods: {
|
||||
...mapActions('dag', ['editProcessState', 'getStartCheck', 'getReceiver', 'deleteDefinition', 'batchDeleteDefinition','exportDefinition']),
|
||||
...mapActions('dag', ['editProcessState', 'getStartCheck', 'getReceiver', 'deleteDefinition', 'batchDeleteDefinition','exportDefinition','copyProcess']),
|
||||
_rtPublishStatus (code) {
|
||||
return _.filter(publishStatus, v => v.code === code)[0].desc
|
||||
},
|
||||
@ -306,6 +307,21 @@
|
||||
releaseState: 1
|
||||
})
|
||||
},
|
||||
/**
|
||||
* copy
|
||||
*/
|
||||
_copyProcess (item) {
|
||||
this.copyProcess({
|
||||
processId: item.id
|
||||
}).then(res => {
|
||||
this.$message.success(res.msg)
|
||||
$('body').find('.tooltip.fade.top.in').remove()
|
||||
this._onUpdate()
|
||||
}).catch(e => {
|
||||
this.$message.error(e.msg || '')
|
||||
})
|
||||
},
|
||||
|
||||
_export (item) {
|
||||
this.exportDefinition({
|
||||
processDefinitionId: item.id,
|
||||
|
@ -193,6 +193,7 @@
|
||||
runMode: 'RUN_MODE_SERIAL',
|
||||
processInstancePriority: 'MEDIUM',
|
||||
workerGroup: 'default'
|
||||
|
||||
}
|
||||
},
|
||||
props: {
|
||||
@ -277,6 +278,18 @@
|
||||
this.workflowName = this.item.name
|
||||
|
||||
this._getReceiver()
|
||||
let stateWorkerGroupsList = this.store.state.security.workerGroupsListAll || []
|
||||
if (stateWorkerGroupsList.length) {
|
||||
this.workerGroup = stateWorkerGroupsList[0].id
|
||||
} else {
|
||||
this.store.dispatch('security/getWorkerGroupsAll').then(res => {
|
||||
this.$nextTick(() => {
|
||||
if(res.length>0) {
|
||||
this.workerGroup = res[0].id
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
mounted () {
|
||||
this._getNotifyGroupList().then(() => {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user