mirror of
https://gitee.com/dolphinscheduler/DolphinScheduler.git
synced 2024-11-30 19:27:38 +08:00
Merge branch 'dev' of https://github.com/apache/incubator-dolphinscheduler into dev
This commit is contained in:
commit
91e563daeb
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -2,7 +2,6 @@
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: "[Bug][Module Name] Bug title "
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -2,7 +2,6 @@
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: "[Feature][Module Name] Feature title"
|
||||
labels: new feature
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
@ -2,7 +2,6 @@
|
||||
name: Improvement suggestion
|
||||
about: Improvement suggestion for this project
|
||||
title: "[Improvement][Module Name] Improvement title"
|
||||
labels: improvement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
1
.github/ISSUE_TEMPLATE/question.md
vendored
1
.github/ISSUE_TEMPLATE/question.md
vendored
@ -2,7 +2,6 @@
|
||||
name: Question
|
||||
about: Have a question wanted to be help
|
||||
title: "[Question] Question title"
|
||||
labels: question
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
2
.github/workflows/ci_backend.yml
vendored
2
.github/workflows/ci_backend.yml
vendored
@ -49,7 +49,7 @@ jobs:
|
||||
with:
|
||||
submodule: true
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes@9bd5feb
|
||||
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
|
||||
- name: Set up JDK 1.8
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
|
2
.github/workflows/ci_e2e.yml
vendored
2
.github/workflows/ci_e2e.yml
vendored
@ -33,7 +33,7 @@ jobs:
|
||||
with:
|
||||
submodule: true
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes@9bd5feb
|
||||
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
|
||||
- uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
|
2
.github/workflows/ci_ut.yml
vendored
2
.github/workflows/ci_ut.yml
vendored
@ -36,7 +36,7 @@ jobs:
|
||||
with:
|
||||
submodule: true
|
||||
- name: Check License Header
|
||||
uses: apache/skywalking-eyes@9bd5feb
|
||||
uses: apache/skywalking-eyes@ec88b7d850018c8983f87729ea88549e100c5c82
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Only enable review / suggestion here
|
||||
- uses: actions/cache@v1
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -7,6 +7,10 @@
|
||||
.target
|
||||
.idea/
|
||||
target/
|
||||
dist/
|
||||
all-dependencies.txt
|
||||
self-modules.txt
|
||||
third-party-dependencies.txt
|
||||
.settings
|
||||
.nbproject
|
||||
.classpath
|
||||
|
54
README.md
54
README.md
@ -7,46 +7,44 @@ Dolphin Scheduler Official Website
|
||||
[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apache-dolphinscheduler&metric=alert_status)](https://sonarcloud.io/dashboard?id=apache-dolphinscheduler)
|
||||
|
||||
|
||||
> Dolphin Scheduler for Big Data
|
||||
|
||||
[![Stargazers over time](https://starchart.cc/apache/incubator-dolphinscheduler.svg)](https://starchart.cc/apache/incubator-dolphinscheduler)
|
||||
|
||||
[![EN doc](https://img.shields.io/badge/document-English-blue.svg)](README.md)
|
||||
[![CN doc](https://img.shields.io/badge/文档-中文版-blue.svg)](README_zh_CN.md)
|
||||
|
||||
|
||||
### Design features:
|
||||
### Design Features:
|
||||
|
||||
Dolphin Scheduler is a distributed and easy-to-extend visual DAG workflow scheduling system. It dedicates to solving the complex dependencies in data processing to make the scheduling system `out of the box` for the data processing process.
|
||||
DolphinScheduler is a distributed and extensible workflow scheduler platform with powerful DAG visual interfaces, dedicated to solving complex job dependencies in the data pipeline and providing various types of jobs available `out of the box`.
|
||||
|
||||
Its main objectives are as follows:
|
||||
|
||||
- Associate the tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of the task in real-time.
|
||||
- Support many task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc.
|
||||
- Support process scheduling, dependency scheduling, manual scheduling, manual pause/stop/recovery, support for failed retry/alarm, recovery from specified nodes, Kill task, etc.
|
||||
- Support the priority of process & task, task failover, and task timeout alarm or failure.
|
||||
- Support process global parameters and node custom parameter settings.
|
||||
- Support online upload/download of resource files, management, etc. Support online file creation and editing.
|
||||
- Support task log online viewing and scrolling, online download log, etc.
|
||||
- Implement cluster HA, decentralize Master cluster and Worker cluster through Zookeeper.
|
||||
- Support various task types: Shell, MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Sub_Process, Procedure, etc.
|
||||
- Support scheduling of workflows and dependencies, manual scheduling to pause/stop/recover task, support failure task retry/alarm, recover specified nodes from failure, kill task, etc.
|
||||
- Support the priority of workflows & tasks, task failover, and task timeout alarm or failure.
|
||||
- Support workflow global parameters and node customized parameter settings.
|
||||
- Support online upload/download/management of resource files, etc. Support online file creation and editing.
|
||||
- Support task log online viewing and scrolling and downloading, etc.
|
||||
- Have implemented cluster HA, decentralize Master cluster and Worker cluster through Zookeeper.
|
||||
- Support the viewing of Master/Worker CPU load, memory, and CPU usage metrics.
|
||||
- Support presenting tree or Gantt chart of workflow history as well as the statistics results of task & process status in each workflow.
|
||||
- Support backfilling data.
|
||||
- Support displaying workflow history in tree/Gantt chart, as well as statistical analysis on the task status & process status in each workflow.
|
||||
- Support back-filling data.
|
||||
- Support multi-tenant.
|
||||
- Support internationalization.
|
||||
- There are more waiting for partners to explore...
|
||||
- More features waiting for partners to explore...
|
||||
|
||||
|
||||
### What's in Dolphin Scheduler
|
||||
### What's in DolphinScheduler
|
||||
|
||||
Stability | Easy to use | Features | Scalability |
|
||||
-- | -- | -- | --
|
||||
Decentralized multi-master and multi-worker | Visualization process defines key information such as task status, task type, retry times, task running machine, visual variables, and so on at a glance. | Support pause, recover operation | Support custom task types
|
||||
HA is supported by itself | All process definition operations are visualized, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, the API mode operation is provided. | Users on Dolphin Scheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler uses distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic online and offline.
|
||||
Overload processing: Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | |
|
||||
Decentralized multi-master and multi-worker | Visualization of workflow key information, such as task status, task type, retry times, task operation machine information, visual variables, and so on at a glance. | Support pause, recover operation | Support customized task types
|
||||
support HA | Visualization of all workflow operations, dragging tasks to draw DAGs, configuring data sources and resources. At the same time, for third-party systems, provide API mode operations. | Users on DolphinScheduler can achieve many-to-one or one-to-one mapping relationship through tenants and Hadoop users, which is very important for scheduling large data jobs. | The scheduler supports distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster. Master and Worker support dynamic adjustment.
|
||||
Overload processing: By using the task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured. Machine jam can be avoided with high tolerance to numbers of tasks cached in task queue. | One-click deployment | Support traditional shell tasks, and big data platform task scheduling: MR, Spark, SQL (MySQL, PostgreSQL, hive, spark SQL), Python, Procedure, Sub_Process | |
|
||||
|
||||
|
||||
### System partial screenshot
|
||||
### User Interface Screenshots
|
||||
|
||||
![home page](https://user-images.githubusercontent.com/15833811/75218288-bf286400-57d4-11ea-8263-d639c6511d5f.jpg)
|
||||
![dag](https://user-images.githubusercontent.com/15833811/75236750-3374fe80-57f9-11ea-857d-62a66a5a559d.png)
|
||||
@ -57,13 +55,9 @@ Overload processing: Overload processing: By using the task queue mechanism, the
|
||||
![security](https://user-images.githubusercontent.com/15833811/75236441-bfd2f180-57f8-11ea-88bd-f24311e01b7e.png)
|
||||
![treeview](https://user-images.githubusercontent.com/15833811/75217191-3fe56100-57d1-11ea-8856-f19180d9a879.png)
|
||||
|
||||
### QuickStart in Docker
|
||||
Please referer the official website document:[[QuickStart in Docker](https://dolphinscheduler.apache.org/en-us/docs/1.3.4/user_doc/docker-deployment.html)]
|
||||
|
||||
### Recent R&D plan
|
||||
The work plan of Dolphin Scheduler: [R&D plan](https://github.com/apache/incubator-dolphinscheduler/projects/1), which `In Develop` card shows the features that are currently being developed and TODO card lists what needs to be done(including feature ideas).
|
||||
|
||||
### How to contribute
|
||||
|
||||
Welcome to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/docs/development/contribute.html)]
|
||||
|
||||
### How to Build
|
||||
|
||||
@ -80,14 +74,16 @@ dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-${latest.release
|
||||
|
||||
### Thanks
|
||||
|
||||
Dolphin Scheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on.
|
||||
We would like to express our deep gratitude to all the open-source projects which contribute to making the dream of Dolphin Scheduler comes true. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we expect the partners who have the same passion and conviction to open-source will join in and contribute to the open-source community!
|
||||
|
||||
DolphinScheduler is based on a lot of excellent open-source projects, such as google guava, guice, grpc, netty, ali bonecp, quartz, and many open-source projects of Apache and so on.
|
||||
We would like to express our deep gratitude to all the open-source projects used in Dolphin Scheduler. We hope that we are not only the beneficiaries of open-source, but also give back to the community. Besides, we hope everyone who have the same enthusiasm and passion for open source could join in and contribute to the open-source community!
|
||||
|
||||
### Get Help
|
||||
1. Submit an issue
|
||||
1. Submit an [[issue](https://github.com/apache/incubator-dolphinscheduler/issues/new/choose)]
|
||||
1. Subscribe to the mail list: https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html, then email dev@dolphinscheduler.apache.org
|
||||
|
||||
### How to Contribute
|
||||
The community welcomes everyone to participate in contributing, please refer to this website to find out more: [[How to contribute](https://dolphinscheduler.apache.org/en-us/community/development/contribute.html)]
|
||||
|
||||
|
||||
### License
|
||||
Please refer to the [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file.
|
||||
|
@ -15,53 +15,64 @@
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>worker.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker execute thread num</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker heartbeat interval</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.reserved.memory</name>
|
||||
<value>0.3</value>
|
||||
<description>only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G.</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>worker.listen.port</name>
|
||||
<value>1234</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker listen port</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.groups</name>
|
||||
<value>default</value>
|
||||
<description>default worker group</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker execute thread num</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker heartbeat interval</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.reserved.memory</name>
|
||||
<value>0.3</value>
|
||||
<description>only larger than reserved memory, worker server can work. default value : physical memory * 1/10,
|
||||
unit is G.
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.listen.port</name>
|
||||
<value>1234</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker listen port</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.groups</name>
|
||||
<value>default</value>
|
||||
<description>default worker group</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.weigth</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>worker weight</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
@ -15,55 +15,37 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
FROM nginx:alpine
|
||||
FROM openjdk:8-jdk-alpine
|
||||
|
||||
ARG VERSION
|
||||
|
||||
ENV TZ Asia/Shanghai
|
||||
ENV LANG C.UTF-8
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV DOCKER true
|
||||
|
||||
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo.
|
||||
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
|
||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
|
||||
# 1. install command/library/software
|
||||
# If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
|
||||
# RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
|
||||
# RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.tuna.tsinghua.edu.cn/g' /etc/apk/repositories
|
||||
RUN apk update && \
|
||||
apk add --update --no-cache dos2unix shadow bash openrc python2 python3 sudo vim wget iputils net-tools openssh-server py-pip tini && \
|
||||
apk add --update --no-cache procps && \
|
||||
openrc boot && \
|
||||
pip install kazoo
|
||||
apk add --no-cache tzdata dos2unix bash python2 python3 procps sudo shadow tini postgresql-client && \
|
||||
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
||||
apk del tzdata && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
#2. install jdk
|
||||
RUN apk add --update --no-cache openjdk8
|
||||
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
|
||||
ENV PATH $JAVA_HOME/bin:$PATH
|
||||
|
||||
#3. add dolphinscheduler
|
||||
# 2. add dolphinscheduler
|
||||
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
|
||||
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
|
||||
RUN ln -s /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin /opt/dolphinscheduler
|
||||
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
|
||||
|
||||
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed
|
||||
RUN apk add --update --no-cache postgresql postgresql-contrib
|
||||
|
||||
#5. modify nginx
|
||||
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
|
||||
rm -rf /etc/nginx/conf.d/*
|
||||
COPY ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
|
||||
|
||||
#6. add configuration and modify permissions and set soft links
|
||||
# 3. add configuration and modify permissions and set soft links
|
||||
COPY ./checkpoint.sh /root/checkpoint.sh
|
||||
COPY ./startup-init-conf.sh /root/startup-init-conf.sh
|
||||
COPY ./startup.sh /root/startup.sh
|
||||
COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
|
||||
COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
|
||||
COPY conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
|
||||
RUN chmod +x /root/checkpoint.sh && \
|
||||
chmod +x /root/startup-init-conf.sh && \
|
||||
chmod +x /root/startup.sh && \
|
||||
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
|
||||
chmod +x /opt/dolphinscheduler/script/*.sh && \
|
||||
chmod +x /opt/dolphinscheduler/bin/*.sh && \
|
||||
dos2unix /root/checkpoint.sh && \
|
||||
COPY ./conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
|
||||
RUN dos2unix /root/checkpoint.sh && \
|
||||
dos2unix /root/startup-init-conf.sh && \
|
||||
dos2unix /root/startup.sh && \
|
||||
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
|
||||
@ -71,13 +53,10 @@ RUN chmod +x /root/checkpoint.sh && \
|
||||
dos2unix /opt/dolphinscheduler/bin/*.sh && \
|
||||
rm -rf /bin/sh && \
|
||||
ln -s /bin/bash /bin/sh && \
|
||||
mkdir -p /tmp/xls && \
|
||||
#7. remove apk index cache and disable coredup for sudo
|
||||
rm -rf /var/cache/apk/* && \
|
||||
mkdir -p /var/mail /tmp/xls && \
|
||||
echo "Set disable_coredump false" >> /etc/sudo.conf
|
||||
|
||||
|
||||
#8. expose port
|
||||
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
|
||||
# 4. expose port
|
||||
EXPOSE 5678 1234 12345 50051
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]
|
||||
|
@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org
|
||||
|
||||
#### You can start a dolphinscheduler instance
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -33,7 +33,7 @@ You can specify **existing postgres service**. Example:
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
|
||||
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -43,7 +43,7 @@ You can specify **existing zookeeper service**. Example:
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -90,14 +90,6 @@ $ docker run -dit --name dolphinscheduler \
|
||||
dolphinscheduler alert-server
|
||||
```
|
||||
|
||||
* Start a **frontend**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler frontend
|
||||
```
|
||||
|
||||
**Note**: You must be specify `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
|
||||
|
||||
@ -146,7 +138,7 @@ This environment variable sets the host for database. The default value is `127.
|
||||
|
||||
This environment variable sets the port for database. The default value is `5432`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`DATABASE_USERNAME`**
|
||||
|
||||
@ -306,18 +298,6 @@ This environment variable sets enterprise wechat agent id for `alert-server`. Th
|
||||
|
||||
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
|
||||
|
||||
**`FRONTEND_API_SERVER_HOST`**
|
||||
|
||||
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
|
||||
|
||||
**`FRONTEND_API_SERVER_PORT`**
|
||||
|
||||
This environment variable sets api server port for `frontend`. The default value is `123451`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
|
||||
|
||||
## Initialization scripts
|
||||
|
||||
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
|
||||
@ -326,7 +306,7 @@ For example, to add an environment variable `API_SERVER_PORT` in `/root/start-in
|
||||
|
||||
```
|
||||
export API_SERVER_PORT=5555
|
||||
```
|
||||
```
|
||||
|
||||
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port:
|
||||
```
|
||||
@ -343,8 +323,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
|
||||
EOF
|
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
|
||||
done
|
||||
|
||||
echo "generate nginx config"
|
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
```
|
||||
|
@ -15,9 +15,9 @@ Official Website: https://dolphinscheduler.apache.org
|
||||
|
||||
#### 你可以运行一个dolphinscheduler实例
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e DATABASE_USERNAME=test -e DATABASE_PASSWORD=test -e DATABASE_DATABASE=dolphinscheduler \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -33,7 +33,7 @@ dolphinscheduler all
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
|
||||
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -43,7 +43,7 @@ dolphinscheduler all
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" -e DATABASE_DATABASE="dolphinscheduler" \
|
||||
-p 8888:8888 \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
@ -90,15 +90,6 @@ $ docker run -dit --name dolphinscheduler \
|
||||
dolphinscheduler alert-server
|
||||
```
|
||||
|
||||
* 启动一个 **frontend**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler frontend
|
||||
```
|
||||
|
||||
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `DATABASE_HOST` `DATABASE_PORT` `DATABASE_DATABASE` `DATABASE_USERNAME` `DATABASE_PASSWORD` `ZOOKEEPER_QUORUM`。
|
||||
|
||||
## 如何构建一个docker镜像
|
||||
@ -306,18 +297,6 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
|
||||
|
||||
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`。
|
||||
|
||||
**`FRONTEND_API_SERVER_HOST`**
|
||||
|
||||
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`。
|
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
|
||||
|
||||
**`FRONTEND_API_SERVER_PORT`**
|
||||
|
||||
配置`frontend`的连接`api-server`的端口,默认值 `12345`。
|
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
|
||||
|
||||
## 初始化脚本
|
||||
|
||||
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件
|
||||
@ -326,7 +305,7 @@ Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些
|
||||
|
||||
```
|
||||
export API_SERVER_PORT=5555
|
||||
```
|
||||
```
|
||||
|
||||
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置:
|
||||
```
|
||||
@ -343,8 +322,4 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
|
||||
EOF
|
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
|
||||
done
|
||||
|
||||
echo "generate nginx config"
|
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
```
|
||||
|
0
docker/build/checkpoint.sh
Normal file → Executable file
0
docker/build/checkpoint.sh
Normal file → Executable file
12
docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
vendored
Normal file → Executable file
12
docker/build/conf/dolphinscheduler/env/dolphinscheduler_env.sh
vendored
Normal file → Executable file
@ -15,6 +15,14 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export PYTHON_HOME=/usr/bin/python2
|
||||
export HADOOP_HOME=/opt/soft/hadoop
|
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
||||
export SPARK_HOME1=/opt/soft/spark1
|
||||
export SPARK_HOME2=/opt/soft/spark2
|
||||
export PYTHON_HOME=/usr/bin/python
|
||||
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
|
||||
export PATH=$PYTHON_HOME/bin:$JAVA_HOME/bin:$PATH
|
||||
export HIVE_HOME=/opt/soft/hive
|
||||
export FLINK_HOME=/opt/soft/flink
|
||||
export DATAX_HOME=/opt/soft/datax/bin/datax.py
|
||||
|
||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
|
||||
|
@ -20,14 +20,6 @@
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-alert.log</file>
|
||||
@ -45,7 +37,6 @@
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="ALERTLOGFILE"/>
|
||||
</root>
|
||||
|
||||
|
@ -20,14 +20,6 @@
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- api server logback config start -->
|
||||
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
@ -55,7 +47,6 @@
|
||||
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</root>
|
||||
|
||||
|
@ -20,14 +20,6 @@
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<conversionRule conversionWord="messsage"
|
||||
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
|
||||
@ -74,7 +66,6 @@
|
||||
<!-- master server logback config end -->
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="MASTERLOGFILE"/>
|
||||
</root>
|
||||
|
@ -20,14 +20,6 @@
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- worker server logback config start -->
|
||||
<conversionRule conversionWord="messsage"
|
||||
@ -75,7 +67,6 @@
|
||||
<!-- worker server logback config end -->
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="WORKERLOGFILE"/>
|
||||
</root>
|
||||
|
@ -1,51 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
server {
|
||||
listen 8888;
|
||||
server_name localhost;
|
||||
#charset koi8-r;
|
||||
#access_log /var/log/nginx/host.access.log main;
|
||||
location / {
|
||||
root /opt/dolphinscheduler/ui;
|
||||
index index.html index.html;
|
||||
}
|
||||
location /dolphinscheduler/ui{
|
||||
alias /opt/dolphinscheduler/ui;
|
||||
}
|
||||
location /dolphinscheduler {
|
||||
proxy_pass http://FRONTEND_API_SERVER_HOST:FRONTEND_API_SERVER_PORT;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header x_real_ipP $remote_addr;
|
||||
proxy_set_header remote_addr $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_http_version 1.1;
|
||||
proxy_connect_timeout 300s;
|
||||
proxy_read_timeout 300s;
|
||||
proxy_send_timeout 300s;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
#error_page 404 /404.html;
|
||||
# redirect server error pages to the static page /50x.html
|
||||
#
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# The number of milliseconds of each tick
|
||||
tickTime=2000
|
||||
# The number of ticks that the initial
|
||||
# synchronization phase can take
|
||||
initLimit=10
|
||||
# The number of ticks that can pass between
|
||||
# sending a request and getting an acknowledgement
|
||||
syncLimit=5
|
||||
# the directory where the snapshot is stored.
|
||||
# do not use /tmp for storage, /tmp here is just
|
||||
# example sakes.
|
||||
dataDir=/tmp/zookeeper
|
||||
# the port at which the clients will connect
|
||||
clientPort=2181
|
||||
# the maximum number of client connections.
|
||||
# increase this if you need to handle more clients
|
||||
#maxClientCnxns=60
|
||||
#
|
||||
# Be sure to read the maintenance section of the
|
||||
# administrator guide before turning on autopurge.
|
||||
#
|
||||
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
|
||||
#
|
||||
# The number of snapshots to retain in dataDir
|
||||
#autopurge.snapRetainCount=3
|
||||
# Purge task interval in hours
|
||||
# Set to "0" to disable auto purge feature
|
||||
#autopurge.purgeInterval=1
|
||||
#Four Letter Words commands:stat,ruok,conf,isro
|
||||
4lw.commands.whitelist=*
|
9
docker/build/hooks/build
Normal file → Executable file
9
docker/build/hooks/build
Normal file → Executable file
@ -48,7 +48,12 @@ echo -e "mv $(pwd)/dolphinscheduler-dist/target/apache-dolphinscheduler-incubati
|
||||
mv "$(pwd)"/dolphinscheduler-dist/target/apache-dolphinscheduler-incubating-"${VERSION}"-dolphinscheduler-bin.tar.gz $(pwd)/docker/build/
|
||||
|
||||
# docker build
|
||||
echo -e "docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/\n"
|
||||
sudo docker build --build-arg VERSION="${VERSION}" -t $DOCKER_REPO:"${VERSION}" "$(pwd)/docker/build/"
|
||||
BUILD_COMMAND="docker build --build-arg VERSION=${VERSION} -t $DOCKER_REPO:${VERSION} $(pwd)/docker/build/"
|
||||
echo -e "$BUILD_COMMAND\n"
|
||||
if (docker info 2> /dev/null | grep -i "ERROR"); then
|
||||
sudo $BUILD_COMMAND
|
||||
else
|
||||
$BUILD_COMMAND
|
||||
fi
|
||||
|
||||
echo "------ dolphinscheduler end - build -------"
|
||||
|
0
docker/build/hooks/push
Normal file → Executable file
0
docker/build/hooks/push
Normal file → Executable file
18
docker/build/startup-init-conf.sh
Normal file → Executable file
18
docker/build/startup-init-conf.sh
Normal file → Executable file
@ -39,9 +39,9 @@ export DATABASE_PARAMS=${DATABASE_PARAMS:-"characterEncoding=utf8"}
|
||||
export DOLPHINSCHEDULER_ENV_PATH=${DOLPHINSCHEDULER_ENV_PATH:-"/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"}
|
||||
export DOLPHINSCHEDULER_DATA_BASEDIR_PATH=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH:-"/tmp/dolphinscheduler"}
|
||||
export DOLPHINSCHEDULER_OPTS=${DOLPHINSCHEDULER_OPTS:-""}
|
||||
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"NONE"}
|
||||
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/ds"}
|
||||
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"s3a://xxxx"}
|
||||
export RESOURCE_STORAGE_TYPE=${RESOURCE_STORAGE_TYPE:-"HDFS"}
|
||||
export RESOURCE_UPLOAD_PATH=${RESOURCE_UPLOAD_PATH:-"/dolphinscheduler"}
|
||||
export FS_DEFAULT_FS=${FS_DEFAULT_FS:-"file:///"}
|
||||
export FS_S3A_ENDPOINT=${FS_S3A_ENDPOINT:-"s3.xxx.amazonaws.com"}
|
||||
export FS_S3A_ACCESS_KEY=${FS_S3A_ACCESS_KEY:-"xxxxxxx"}
|
||||
export FS_S3A_SECRET_KEY=${FS_S3A_SECRET_KEY:-"xxxxxxx"}
|
||||
@ -81,7 +81,7 @@ export WORKER_WEIGHT=${WORKER_WEIGHT:-"100"}
|
||||
#============================================================================
|
||||
# alert plugin dir
|
||||
export ALERT_PLUGIN_DIR=${ALERT_PLUGIN_DIR:-"/opt/dolphinscheduler"}
|
||||
# XLS FILE
|
||||
# xls file
|
||||
export XLS_FILE_PATH=${XLS_FILE_PATH:-"/tmp/xls"}
|
||||
# mail
|
||||
export MAIL_SERVER_HOST=${MAIL_SERVER_HOST:-""}
|
||||
@ -99,12 +99,6 @@ export ENTERPRISE_WECHAT_SECRET=${ENTERPRISE_WECHAT_SECRET:-""}
|
||||
export ENTERPRISE_WECHAT_AGENT_ID=${ENTERPRISE_WECHAT_AGENT_ID:-""}
|
||||
export ENTERPRISE_WECHAT_USERS=${ENTERPRISE_WECHAT_USERS:-""}
|
||||
|
||||
#============================================================================
|
||||
# Frontend
|
||||
#============================================================================
|
||||
export FRONTEND_API_SERVER_HOST=${FRONTEND_API_SERVER_HOST:-"127.0.0.1"}
|
||||
export FRONTEND_API_SERVER_PORT=${FRONTEND_API_SERVER_PORT:-"12345"}
|
||||
|
||||
echo "generate app config"
|
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
|
||||
eval "cat << EOF
|
||||
@ -112,7 +106,3 @@ $(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
|
||||
EOF
|
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
|
||||
done
|
||||
|
||||
echo "generate nginx config"
|
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
55
docker/build/startup.sh
Normal file → Executable file
55
docker/build/startup.sh
Normal file → Executable file
@ -22,8 +22,8 @@ DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
|
||||
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
|
||||
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
|
||||
|
||||
# start database
|
||||
initDatabase() {
|
||||
# wait database
|
||||
waitDatabase() {
|
||||
echo "test ${DATABASE_TYPE} service"
|
||||
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
|
||||
counter=$((counter+1))
|
||||
@ -43,19 +43,22 @@ initDatabase() {
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
|
||||
v=$(PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
|
||||
if [ "$(echo ${v} | grep 'FATAL' | wc -l)" -eq 1 ]; then
|
||||
echo "Error: Can't connect to database...${v}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# init database
|
||||
initDatabase() {
|
||||
echo "import sql data"
|
||||
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
|
||||
}
|
||||
|
||||
# start zk
|
||||
initZK() {
|
||||
# wait zk
|
||||
waitZK() {
|
||||
echo "connect remote zookeeper"
|
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
@ -70,12 +73,6 @@ initZK() {
|
||||
done
|
||||
}
|
||||
|
||||
# start nginx
|
||||
initNginx() {
|
||||
echo "start nginx"
|
||||
nginx &
|
||||
}
|
||||
|
||||
# start master-server
|
||||
initMasterServer() {
|
||||
echo "start master-server"
|
||||
@ -115,59 +112,54 @@ initAlertServer() {
|
||||
printUsage() {
|
||||
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system,"
|
||||
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n"
|
||||
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n"
|
||||
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend."
|
||||
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server ]\n"
|
||||
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server and alert-server"
|
||||
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring."
|
||||
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.."
|
||||
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer."
|
||||
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services."
|
||||
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests and providing the front-end UI layer."
|
||||
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms."
|
||||
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system."
|
||||
}
|
||||
|
||||
# init config file
|
||||
source /root/startup-init-conf.sh
|
||||
|
||||
LOGFILE=/var/log/nginx/access.log
|
||||
case "$1" in
|
||||
(all)
|
||||
initZK
|
||||
waitZK
|
||||
waitDatabase
|
||||
initDatabase
|
||||
initMasterServer
|
||||
initWorkerServer
|
||||
initApiServer
|
||||
initAlertServer
|
||||
initLoggerServer
|
||||
initNginx
|
||||
LOGFILE=/var/log/nginx/access.log
|
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
|
||||
;;
|
||||
(master-server)
|
||||
initZK
|
||||
initDatabase
|
||||
waitZK
|
||||
waitDatabase
|
||||
initMasterServer
|
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
|
||||
;;
|
||||
(worker-server)
|
||||
initZK
|
||||
initDatabase
|
||||
waitZK
|
||||
waitDatabase
|
||||
initWorkerServer
|
||||
initLoggerServer
|
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
|
||||
;;
|
||||
(api-server)
|
||||
initZK
|
||||
waitZK
|
||||
waitDatabase
|
||||
initDatabase
|
||||
initApiServer
|
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
|
||||
;;
|
||||
(alert-server)
|
||||
initDatabase
|
||||
waitDatabase
|
||||
initAlertServer
|
||||
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
|
||||
;;
|
||||
(frontend)
|
||||
initNginx
|
||||
LOGFILE=/var/log/nginx/access.log
|
||||
;;
|
||||
(help)
|
||||
printUsage
|
||||
exit 1
|
||||
@ -179,8 +171,7 @@ case "$1" in
|
||||
esac
|
||||
|
||||
# init directories and log files
|
||||
mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE}
|
||||
mkdir -p ${DOLPHINSCHEDULER_LOGS} && cat /dev/null >> ${LOGFILE}
|
||||
|
||||
echo "tail begin"
|
||||
exec bash -c "tail -n 1 -f ${LOGFILE}"
|
||||
|
||||
|
2
docker/docker-swarm/check
Normal file → Executable file
2
docker/docker-swarm/check
Normal file → Executable file
@ -25,7 +25,7 @@ else
|
||||
echo "Server start failed "$server_num
|
||||
exit 1
|
||||
fi
|
||||
ready=`curl http://127.0.0.1:8888/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
|
||||
ready=`curl http://127.0.0.1:12345/dolphinscheduler/login -d 'userName=admin&userPassword=dolphinscheduler123' -v | grep "login success" | wc -l`
|
||||
if [ $ready -eq 1 ]
|
||||
then
|
||||
echo "Servers is ready"
|
||||
|
@ -31,6 +31,7 @@ services:
|
||||
volumes:
|
||||
- dolphinscheduler-postgresql:/bitnami/postgresql
|
||||
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
@ -45,13 +46,14 @@ services:
|
||||
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
|
||||
volumes:
|
||||
- dolphinscheduler-zookeeper:/bitnami/zookeeper
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-api:
|
||||
image: apache/dolphinscheduler:latest
|
||||
container_name: dolphinscheduler-api
|
||||
command: ["api-server"]
|
||||
command: api-server
|
||||
ports:
|
||||
- 12345:12345
|
||||
environment:
|
||||
@ -62,6 +64,9 @@ services:
|
||||
DATABASE_PASSWORD: root
|
||||
DATABASE_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
RESOURCE_STORAGE_TYPE: HDFS
|
||||
RESOURCE_UPLOAD_PATH: /dolphinscheduler
|
||||
FS_DEFAULT_FS: file:///
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
|
||||
interval: 30s
|
||||
@ -72,37 +77,16 @@ services:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-frontend:
|
||||
image: apache/dolphinscheduler:latest
|
||||
container_name: dolphinscheduler-frontend
|
||||
command: ["frontend"]
|
||||
ports:
|
||||
- 8888:8888
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
|
||||
FRONTEND_API_SERVER_PORT: 12345
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-z", "localhost", "8888"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
- dolphinscheduler-api
|
||||
volumes:
|
||||
- ./dolphinscheduler-logs:/var/log/nginx
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
- dolphinscheduler-resource-local:/dolphinscheduler
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-alert:
|
||||
image: apache/dolphinscheduler:latest
|
||||
container_name: dolphinscheduler-alert
|
||||
command: ["alert-server"]
|
||||
command: alert-server
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
@ -133,14 +117,15 @@ services:
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
volumes:
|
||||
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-master:
|
||||
image: apache/dolphinscheduler:latest
|
||||
container_name: dolphinscheduler-master
|
||||
command: ["master-server"]
|
||||
command: master-server
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
@ -168,14 +153,15 @@ services:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
dolphinscheduler-worker:
|
||||
image: apache/dolphinscheduler:latest
|
||||
container_name: dolphinscheduler-worker
|
||||
command: ["worker-server"]
|
||||
command: worker-server
|
||||
ports:
|
||||
- 1234:1234
|
||||
- 50051:50051
|
||||
@ -188,30 +174,40 @@ services:
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_GROUP: "default"
|
||||
WORKER_WEIGHT: "100"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
MAIL_SERVER_PORT: ""
|
||||
MAIL_SENDER: ""
|
||||
MAIL_USER: ""
|
||||
MAIL_PASSWD: ""
|
||||
MAIL_SMTP_STARTTLS_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_TRUST: ""
|
||||
DATABASE_HOST: dolphinscheduler-postgresql
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USERNAME: root
|
||||
DATABASE_PASSWORD: root
|
||||
DATABASE_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
RESOURCE_STORAGE_TYPE: HDFS
|
||||
RESOURCE_UPLOAD_PATH: /dolphinscheduler
|
||||
FS_DEFAULT_FS: file:///
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
depends_on:
|
||||
depends_on:
|
||||
- dolphinscheduler-postgresql
|
||||
- dolphinscheduler-zookeeper
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./dolphinscheduler_env.sh
|
||||
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
- type: volume
|
||||
source: dolphinscheduler-worker-data
|
||||
target: /tmp/dolphinscheduler
|
||||
- ./dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
- ./dolphinscheduler_env.sh:/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
- dolphinscheduler-resource-local:/dolphinscheduler
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
|
||||
@ -224,7 +220,5 @@ volumes:
|
||||
dolphinscheduler-postgresql-initdb:
|
||||
dolphinscheduler-zookeeper:
|
||||
dolphinscheduler-worker-data:
|
||||
|
||||
configs:
|
||||
dolphinscheduler-worker-task-env:
|
||||
file: ./dolphinscheduler_env.sh
|
||||
dolphinscheduler-logs:
|
||||
dolphinscheduler-resource-local:
|
@ -20,13 +20,13 @@ services:
|
||||
|
||||
dolphinscheduler-postgresql:
|
||||
image: bitnami/postgresql:latest
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
POSTGRESQL_USERNAME: root
|
||||
POSTGRESQL_PASSWORD: root
|
||||
POSTGRESQL_DATABASE: dolphinscheduler
|
||||
ports:
|
||||
- 5432:5432
|
||||
volumes:
|
||||
- dolphinscheduler-postgresql:/bitnami/postgresql
|
||||
networks:
|
||||
@ -37,12 +37,12 @@ services:
|
||||
|
||||
dolphinscheduler-zookeeper:
|
||||
image: bitnami/zookeeper:latest
|
||||
ports:
|
||||
- 2181:2181
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
ALLOW_ANONYMOUS_LOGIN: "yes"
|
||||
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
|
||||
ports:
|
||||
- 2181:2181
|
||||
volumes:
|
||||
- dolphinscheduler-zookeeper:/bitnami/zookeeper
|
||||
networks:
|
||||
@ -53,7 +53,9 @@ services:
|
||||
|
||||
dolphinscheduler-api:
|
||||
image: apache/dolphinscheduler:latest
|
||||
command: ["api-server"]
|
||||
command: api-server
|
||||
ports:
|
||||
- 12345:12345
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
DATABASE_HOST: dolphinscheduler-postgresql
|
||||
@ -62,39 +64,17 @@ services:
|
||||
DATABASE_PASSWORD: root
|
||||
DATABASE_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
ports:
|
||||
- 12345:12345
|
||||
RESOURCE_STORAGE_TYPE: HDFS
|
||||
RESOURCE_UPLOAD_PATH: /dolphinscheduler
|
||||
FS_DEFAULT_FS: file:///
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
|
||||
interval: 30
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
|
||||
dolphinscheduler-frontend:
|
||||
image: apache/dolphinscheduler:latest
|
||||
command: ["frontend"]
|
||||
ports:
|
||||
- 8888:8888
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
|
||||
FRONTEND_API_SERVER_PORT: 12345
|
||||
healthcheck:
|
||||
test: ["CMD", "nc", "-z", "localhost", "8888"]
|
||||
interval: 30
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-logs:/var/log/nginx
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
@ -103,7 +83,7 @@ services:
|
||||
|
||||
dolphinscheduler-alert:
|
||||
image: apache/dolphinscheduler:latest
|
||||
command: ["alert-server"]
|
||||
command: alert-server
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
@ -127,13 +107,13 @@ services:
|
||||
DATABASE_DATABASE: dolphinscheduler
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
|
||||
interval: 30
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
mode: replicated
|
||||
@ -141,10 +121,10 @@ services:
|
||||
|
||||
dolphinscheduler-master:
|
||||
image: apache/dolphinscheduler:latest
|
||||
command: ["master-server"]
|
||||
ports:
|
||||
command: master-server
|
||||
ports:
|
||||
- 5678:5678
|
||||
environment:
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
MASTER_EXEC_THREADS: "100"
|
||||
MASTER_EXEC_TASK_NUM: "20"
|
||||
@ -161,7 +141,7 @@ services:
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
|
||||
interval: 30
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
@ -175,11 +155,11 @@ services:
|
||||
|
||||
dolphinscheduler-worker:
|
||||
image: apache/dolphinscheduler:latest
|
||||
command: ["worker-server"]
|
||||
ports:
|
||||
command: worker-server
|
||||
ports:
|
||||
- 1234:1234
|
||||
- 50051:50051
|
||||
environment:
|
||||
environment:
|
||||
TZ: Asia/Shanghai
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
@ -188,25 +168,37 @@ services:
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_GROUP: "default"
|
||||
WORKER_WEIGHT: "100"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: /tmp/dolphinscheduler
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
MAIL_SERVER_PORT: ""
|
||||
MAIL_SENDER: ""
|
||||
MAIL_USER: ""
|
||||
MAIL_PASSWD: ""
|
||||
MAIL_SMTP_STARTTLS_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_ENABLE: "false"
|
||||
MAIL_SMTP_SSL_TRUST: ""
|
||||
DATABASE_HOST: dolphinscheduler-postgresql
|
||||
DATABASE_PORT: 5432
|
||||
DATABASE_USERNAME: root
|
||||
DATABASE_PASSWORD: root
|
||||
DATABASE_DATABASE: dolphinscheduler
|
||||
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
|
||||
RESOURCE_STORAGE_TYPE: HDFS
|
||||
RESOURCE_UPLOAD_PATH: /dolphinscheduler
|
||||
FS_DEFAULT_FS: file:///
|
||||
healthcheck:
|
||||
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
|
||||
interval: 30
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
configs:
|
||||
- source: dolphinscheduler-worker-task-env
|
||||
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
volumes:
|
||||
- dolphinscheduler-worker-data:/tmp/dolphinscheduler
|
||||
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
|
||||
networks:
|
||||
- dolphinscheduler
|
||||
deploy:
|
||||
|
12
docker/docker-swarm/dolphinscheduler_env.sh
Normal file → Executable file
12
docker/docker-swarm/dolphinscheduler_env.sh
Normal file → Executable file
@ -15,6 +15,14 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export PYTHON_HOME=/usr/bin/python2
|
||||
export HADOOP_HOME=/opt/soft/hadoop
|
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
||||
export SPARK_HOME1=/opt/soft/spark1
|
||||
export SPARK_HOME2=/opt/soft/spark2
|
||||
export PYTHON_HOME=/usr/bin/python
|
||||
export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk
|
||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
|
||||
export HIVE_HOME=/opt/soft/hive
|
||||
export FLINK_HOME=/opt/soft/flink
|
||||
export DATAX_HOME=/opt/soft/datax/bin/datax.py
|
||||
|
||||
export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
|
||||
|
@ -22,7 +22,7 @@ home: https://dolphinscheduler.apache.org
|
||||
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
|
||||
keywords:
|
||||
- dolphinscheduler
|
||||
- Scheduler
|
||||
- scheduler
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
@ -35,18 +35,18 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 1.0.0
|
||||
version: 1.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 1.3.0
|
||||
appVersion: 1.4.0
|
||||
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
version: 10.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
version: 6.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: zookeeper.enabled
|
||||
|
@ -7,19 +7,20 @@ This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.10+
|
||||
- Helm 3.1.0+
|
||||
- Kubernetes 1.12+
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
To install the chart with the release name `dolphinscheduler`:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
|
||||
$ cd incubator-dolphinscheduler/kubernetes/dolphinscheduler
|
||||
$ cd incubator-dolphinscheduler/docker/kubernetes/dolphinscheduler
|
||||
$ helm repo add bitnami https://charts.bitnami.com/bitnami
|
||||
$ helm dependency update .
|
||||
$ helm install --name dolphinscheduler .
|
||||
$ helm install dolphinscheduler .
|
||||
```
|
||||
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
@ -30,7 +31,7 @@ These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default
|
||||
To uninstall/delete the `dolphinscheduler` deployment:
|
||||
|
||||
```bash
|
||||
$ helm delete --purge dolphinscheduler
|
||||
$ helm uninstall dolphinscheduler
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
@ -220,32 +221,6 @@ The following tables lists the configurable parameters of the Dolphins Scheduler
|
||||
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `frontend.resources` | The `resource` limit and request config for frontend server. | `{}` |
|
||||
| `frontend.annotations` | The `annotations` for frontend server. | `{}` |
|
||||
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
|
||||
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `ingress.enabled` | Enable ingress | `false` |
|
||||
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -1,25 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
@ -15,9 +15,9 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
** Please be patient while the chart is being deployed **
|
||||
** Please be patient while the chart Dolphinscheduler {{ .Chart.AppVersion }} is being deployed **
|
||||
|
||||
1. Get the Dolphinscheduler URL by running:
|
||||
Get the Dolphinscheduler URL by running:
|
||||
|
||||
{{- if .Values.ingress.enabled }}
|
||||
|
||||
@ -26,6 +26,6 @@
|
||||
|
||||
{{- else }}
|
||||
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-api 12345:12345
|
||||
|
||||
{{- end }}
|
||||
|
@ -135,7 +135,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
Create a default dolphinscheduler worker base dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.base.dir" -}}
|
||||
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
|
||||
{{- define "dolphinscheduler.data.basedir.path" -}}
|
||||
{{- $name := default "/tmp/dolphinscheduler" .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
|
||||
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
|
||||
{{- end -}}
|
@ -24,6 +24,7 @@ metadata:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
DOLPHINSCHEDULER_OPTS: {{ .Values.alert.configmap.DOLPHINSCHEDULER_OPTS | quote }}
|
||||
ALERT_PLUGIN_DIR: {{ .Values.alert.configmap.ALERT_PLUGIN_DIR | quote }}
|
||||
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
|
||||
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
|
||||
|
@ -14,22 +14,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.api.configmap }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8888
|
||||
targetPort: tcp-port
|
||||
protocol: TCP
|
||||
name: tcp-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
data:
|
||||
DOLPHINSCHEDULER_OPTS: {{ .Values.api.configmap.DOLPHINSCHEDULER_OPTS | quote }}
|
||||
{{- end }}
|
@ -24,12 +24,14 @@ metadata:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
DOLPHINSCHEDULER_ENV_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_ENV_PATH | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ .Values.common.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH | quote }}
|
||||
DOLPHINSCHEDULER_ENV: |-
|
||||
{{- range .Values.common.configmap.DOLPHINSCHEDULER_ENV }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
|
||||
RESOURCE_STORAGE_TYPE: {{ .Values.common.configmap.RESOURCE_STORAGE_TYPE | quote }}
|
||||
RESOURCE_UPLOAD_PATH: {{ .Values.common.configmap.RESOURCE_UPLOAD_PATH | quote }}
|
||||
FS_DEFAULT_FS: {{ .Values.common.configmap.FS_DEFAULT_FS | quote }}
|
||||
FS_S3A_ENDPOINT: {{ .Values.common.configmap.FS_S3A_ENDPOINT | quote }}
|
||||
FS_S3A_ACCESS_KEY: {{ .Values.common.configmap.FS_S3A_ACCESS_KEY | quote }}
|
||||
FS_S3A_SECRET_KEY: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | quote }}
|
||||
{{- end }}
|
@ -24,6 +24,7 @@ metadata:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
DOLPHINSCHEDULER_OPTS: {{ .Values.master.configmap.DOLPHINSCHEDULER_OPTS | quote }}
|
||||
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
|
||||
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
|
||||
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
|
||||
@ -32,5 +33,4 @@ data:
|
||||
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
|
||||
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
|
||||
MASTER_LISTEN_PORT: {{ .Values.master.configmap.MASTER_LISTEN_PORT | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
{{- end }}
|
@ -24,17 +24,12 @@ metadata:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
DOLPHINSCHEDULER_OPTS: {{ .Values.worker.configmap.DOLPHINSCHEDULER_OPTS | quote }}
|
||||
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
|
||||
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
|
||||
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
|
||||
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
|
||||
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
|
||||
WORKER_LISTEN_PORT: {{ .Values.worker.configmap.WORKER_LISTEN_PORT | quote }}
|
||||
WORKER_GROUP: {{ .Values.worker.configmap.WORKER_GROUP | quote }}
|
||||
WORKER_WEIGHT: {{ .Values.worker.configmap.WORKER_WEIGHT | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
dolphinscheduler_env.sh: |-
|
||||
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
@ -57,35 +57,6 @@ spec:
|
||||
{{- if .Values.alert.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-database
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to database."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: DATABASE_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: DATABASE_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
|
||||
@ -93,14 +64,17 @@ spec:
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "alert-server"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
value: {{ default "" .Values.alert.jvmOptions }}
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: DOLPHINSCHEDULER_OPTS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ALERT_PLUGIN_DIR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -228,36 +202,6 @@ spec:
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.params | quote }}
|
||||
{{- end }}
|
||||
- name: RESOURCE_STORAGE_TYPE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: RESOURCE_STORAGE_TYPE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: RESOURCE_UPLOAD_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: RESOURCE_UPLOAD_PATH
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_DEFAULT_FS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_DEFAULT_FS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_ENDPOINT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_ENDPOINT
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_ACCESS_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_ACCESS_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_SECRET_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_SECRET_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
{{- if .Values.alert.resources }}
|
||||
resources:
|
||||
limits:
|
||||
|
@ -57,35 +57,6 @@ spec:
|
||||
{{- if .Values.api.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-database
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to database."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: DATABASE_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: DATABASE_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
|
||||
@ -93,17 +64,20 @@ spec:
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "api-server"
|
||||
ports:
|
||||
- containerPort: 12345
|
||||
name: tcp-port
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
value: {{ default "" .Values.api.jvmOptions }}
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: DOLPHINSCHEDULER_OPTS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
- name: DATABASE_TYPE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "postgresql"
|
||||
@ -164,7 +138,7 @@ spec:
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_ROOT
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "/dolphinscheduler"
|
||||
value: {{ .Values.zookeeper.zookeeperRoot }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperRoot }}
|
||||
{{- end }}
|
||||
@ -183,6 +157,7 @@ spec:
|
||||
configMapKeyRef:
|
||||
key: FS_DEFAULT_FS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
|
||||
- name: FS_S3A_ENDPOINT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -195,9 +170,10 @@ spec:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_SECRET_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_SECRET_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
secretKeyRef:
|
||||
key: fs-s3a-secret-key
|
||||
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
|
||||
{{- end }}
|
||||
{{- if .Values.api.resources }}
|
||||
resources:
|
||||
limits:
|
||||
|
@ -1,119 +0,0 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
spec:
|
||||
replicas: {{ .Values.frontend.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
strategy:
|
||||
type: {{ .Values.frontend.strategy.type | quote }}
|
||||
rollingUpdate:
|
||||
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
|
||||
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
{{- if .Values.alert.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.alert.annotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.frontend.affinity }}
|
||||
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "frontend"
|
||||
ports:
|
||||
- containerPort: 8888
|
||||
name: tcp-port
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: FRONTEND_API_SERVER_HOST
|
||||
value: '{{ include "dolphinscheduler.fullname" . }}-api'
|
||||
- name: FRONTEND_API_SERVER_PORT
|
||||
value: "12345"
|
||||
{{- if .Values.frontend.resources }}
|
||||
resources:
|
||||
limits:
|
||||
memory: {{ .Values.frontend.resources.limits.memory | quote }}
|
||||
cpu: {{ .Values.frontend.resources.limits.cpu | quote }}
|
||||
requests:
|
||||
memory: {{ .Values.frontend.resources.requests.memory | quote }}
|
||||
cpu: {{ .Values.frontend.resources.requests.cpu | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 8888
|
||||
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8888
|
||||
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
@ -30,7 +30,7 @@ spec:
|
||||
paths:
|
||||
- path: {{ .Values.ingress.path }}
|
||||
backend:
|
||||
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
serviceName: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
servicePort: tcp-port
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
|
@ -25,7 +25,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
|
||||
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
|
||||
|
@ -25,7 +25,7 @@ metadata:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.api.persistentVolumeClaim.accessModes }}
|
||||
{{- range .Values.api.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
|
||||
|
@ -14,22 +14,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
|
||||
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-fs-s3a
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
|
||||
type: Opaque
|
||||
data:
|
||||
fs-s3a-secret-key: {{ .Values.common.configmap.FS_S3A_SECRET_KEY | b64enc | quote }}
|
||||
{{- end }}
|
@ -54,59 +54,6 @@ spec:
|
||||
{{- if .Values.master.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-zookeeper
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to zookeeper."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
env:
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
- name: init-database
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to database."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: DATABASE_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: DATABASE_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
|
||||
@ -114,17 +61,20 @@ spec:
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "master-server"
|
||||
ports:
|
||||
- containerPort: {{ .Values.master.configmap.MASTER_LISTEN_PORT }}
|
||||
name: "master-port"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
value: {{ default "" .Values.master.jvmOptions }}
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: DOLPHINSCHEDULER_OPTS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
- name: MASTER_EXEC_THREADS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -168,7 +118,7 @@ spec:
|
||||
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
- name: DATABASE_TYPE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
@ -230,40 +180,10 @@ spec:
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_ROOT
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "/dolphinscheduler"
|
||||
value: {{ .Values.zookeeper.zookeeperRoot }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperRoot }}
|
||||
{{- end }}
|
||||
- name: RESOURCE_STORAGE_TYPE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: RESOURCE_STORAGE_TYPE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: RESOURCE_UPLOAD_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: RESOURCE_UPLOAD_PATH
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_DEFAULT_FS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_DEFAULT_FS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_ENDPOINT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_ENDPOINT
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_ACCESS_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_ACCESS_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_SECRET_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_SECRET_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
{{- if .Values.master.resources }}
|
||||
resources:
|
||||
limits:
|
||||
|
@ -54,59 +54,6 @@ spec:
|
||||
{{- if .Values.worker.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-zookeeper
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to zookeeper."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
env:
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
- name: init-database
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to database."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to database at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: DATABASE_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: DATABASE_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
- name: {{ include "dolphinscheduler.image.pullSecrets" . }}
|
||||
@ -114,29 +61,27 @@ spec:
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
args:
|
||||
- "worker-server"
|
||||
ports:
|
||||
- containerPort: {{ .Values.worker.configmap.WORKER_LISTEN_PORT }}
|
||||
name: "worker-port"
|
||||
- containerPort: 50051
|
||||
name: "logs-port"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: "logger-port"
|
||||
env:
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
value: {{ default "" .Values.worker.jvmOptions }}
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: DOLPHINSCHEDULER_OPTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: DOLPHINSCHEDULER_OPTS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
- name: WORKER_EXEC_THREADS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_EXEC_THREADS
|
||||
- name: WORKER_FETCH_TASK_NUM
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_FETCH_TASK_NUM
|
||||
- name: WORKER_HEARTBEAT_INTERVAL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -170,8 +115,58 @@ spec:
|
||||
- name: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
key: DOLPHINSCHEDULER_DATA_BASEDIR_PATH
|
||||
- name: ALERT_PLUGIN_DIR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ALERT_PLUGIN_DIR
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: XLS_FILE_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: XLS_FILE_PATH
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_HOST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_PORT
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SENDER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SENDER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_USER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_PASSWD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_PASSWD
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_STARTTLS_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_STARTTLS_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_TRUST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_TRUST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: DATABASE_TYPE
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "postgresql"
|
||||
@ -232,7 +227,7 @@ spec:
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_ROOT
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "/dolphinscheduler"
|
||||
value: {{ .Values.zookeeper.zookeeperRoot }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperRoot }}
|
||||
{{- end }}
|
||||
@ -251,6 +246,7 @@ spec:
|
||||
configMapKeyRef:
|
||||
key: FS_DEFAULT_FS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
{{- if eq .Values.common.configmap.RESOURCE_STORAGE_TYPE "S3" }}
|
||||
- name: FS_S3A_ENDPOINT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -263,54 +259,10 @@ spec:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: FS_S3A_SECRET_KEY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: FS_S3A_SECRET_KEY
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
- name: XLS_FILE_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: XLS_FILE_PATH
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_HOST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_PORT
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SENDER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SENDER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_USER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_PASSWD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_PASSWD
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_STARTTLS_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_STARTTLS_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_TRUST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_TRUST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
secretKeyRef:
|
||||
key: fs-s3a-secret-key
|
||||
name: {{ printf "%s-%s" .Release.Name "fs-s3a" }}
|
||||
{{- end }}
|
||||
- name: ENTERPRISE_WECHAT_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
@ -372,13 +324,13 @@ spec:
|
||||
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
- mountPath: {{ include "dolphinscheduler.data.basedir.path" . | quote }}
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
- mountPath: "/opt/dolphinscheduler/logs"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
|
||||
subPath: "dolphinscheduler_env.sh"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common-env
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
|
||||
@ -394,12 +346,12 @@ spec:
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-common-env
|
||||
configMap:
|
||||
defaultMode: 0777
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-common
|
||||
items:
|
||||
- key: dolphinscheduler_env.sh
|
||||
- key: DOLPHINSCHEDULER_ENV
|
||||
path: dolphinscheduler_env.sh
|
||||
{{- if .Values.worker.persistentVolumeClaim.enabled }}
|
||||
volumeClaimTemplates:
|
||||
|
@ -30,9 +30,9 @@ spec:
|
||||
protocol: TCP
|
||||
name: worker-port
|
||||
- port: 50051
|
||||
targetPort: logs-port
|
||||
targetPort: logger-port
|
||||
protocol: TCP
|
||||
name: logs-port
|
||||
name: logger-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
@ -58,62 +58,74 @@ externalDatabase:
|
||||
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
|
||||
zookeeper:
|
||||
enabled: true
|
||||
taskQueue: "zookeeper"
|
||||
config: null
|
||||
fourlwCommandsWhitelist: srvr,ruok,wchs,cons
|
||||
service:
|
||||
port: "2181"
|
||||
persistence:
|
||||
enabled: false
|
||||
size: "20Gi"
|
||||
storageClass: "-"
|
||||
zookeeperRoot: "/dolphinscheduler"
|
||||
|
||||
# If exists external zookeeper, and set zookeeper.enable value to false.
|
||||
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
|
||||
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
|
||||
externalZookeeper:
|
||||
taskQueue: "zookeeper"
|
||||
zookeeperQuorum: "127.0.0.1:2181"
|
||||
zookeeperRoot: "/dolphinscheduler"
|
||||
|
||||
common:
|
||||
## ConfigMap
|
||||
configmap:
|
||||
DOLPHINSCHEDULER_ENV_PATH: "/tmp/dolphinscheduler/env"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler/files"
|
||||
RESOURCE_STORAGE_TYPE: "NONE"
|
||||
RESOURCE_UPLOAD_PATH: "/ds"
|
||||
FS_DEFAULT_FS: "s3a://xxxx"
|
||||
DOLPHINSCHEDULER_ENV:
|
||||
- "export HADOOP_HOME=/opt/soft/hadoop"
|
||||
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
|
||||
- "export SPARK_HOME1=/opt/soft/spark1"
|
||||
- "export SPARK_HOME2=/opt/soft/spark2"
|
||||
- "export PYTHON_HOME=/usr/bin/python"
|
||||
- "export JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk"
|
||||
- "export HIVE_HOME=/opt/soft/hive"
|
||||
- "export FLINK_HOME=/opt/soft/flink"
|
||||
- "export DATAX_HOME=/opt/soft/datax/bin/datax.py"
|
||||
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
RESOURCE_STORAGE_TYPE: "HDFS"
|
||||
RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
|
||||
FS_DEFAULT_FS: "file:///"
|
||||
FS_S3A_ENDPOINT: "s3.xxx.amazonaws.com"
|
||||
FS_S3A_ACCESS_KEY: "xxxxxxx"
|
||||
FS_S3A_SECRET_KEY: "xxxxxxx"
|
||||
|
||||
master:
|
||||
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
|
||||
podManagementPolicy: "Parallel"
|
||||
## Replicas is the desired number of replicas of the given Template.
|
||||
replicas: "3"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
# The jvm options for java instance startup
|
||||
jvmOptions: ""
|
||||
resources: {}
|
||||
# limits:
|
||||
# memory: "18Gi"
|
||||
# cpu: "4"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
# You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
# Clients such as tools and libraries can retrieve this metadata.
|
||||
## You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
## Clients such as tools and libraries can retrieve this metadata.
|
||||
annotations: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
|
||||
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
## Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
## Compute Resources required by this container. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# memory: "8Gi"
|
||||
# cpu: "4"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
## ConfigMap
|
||||
configmap:
|
||||
DOLPHINSCHEDULER_OPTS: ""
|
||||
MASTER_EXEC_THREADS: "100"
|
||||
MASTER_EXEC_TASK_NUM: "20"
|
||||
MASTER_HEARTBEAT_INTERVAL: "10"
|
||||
@ -122,6 +134,8 @@ master:
|
||||
MASTER_MAX_CPULOAD_AVG: "100"
|
||||
MASTER_RESERVED_MEMORY: "0.1"
|
||||
MASTER_LISTEN_PORT: "5678"
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
@ -138,7 +152,7 @@ master:
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
@ -150,31 +164,43 @@ master:
|
||||
storage: "20Gi"
|
||||
|
||||
worker:
|
||||
## PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down.
|
||||
podManagementPolicy: "Parallel"
|
||||
## Replicas is the desired number of replicas of the given Template.
|
||||
replicas: "3"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
# The jvm options for java instance startup
|
||||
jvmOptions: ""
|
||||
resources: {}
|
||||
# limits:
|
||||
# memory: "18Gi"
|
||||
# cpu: "4"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
# You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
# Clients such as tools and libraries can retrieve this metadata.
|
||||
## You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
## Clients such as tools and libraries can retrieve this metadata.
|
||||
annotations: {}
|
||||
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
|
||||
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
## Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
## Compute Resources required by this container. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# memory: "8Gi"
|
||||
# cpu: "4"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
## ConfigMap
|
||||
configmap:
|
||||
DOLPHINSCHEDULER_OPTS: ""
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_LISTEN_PORT: "1234"
|
||||
WORKER_GROUP: "default"
|
||||
WORKER_WEIGHT: "100"
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
@ -193,27 +219,7 @@ worker:
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
configmap:
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
WORKER_FETCH_TASK_NUM: "3"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
WORKER_LISTEN_PORT: "1234"
|
||||
WORKER_GROUP: "default"
|
||||
WORKER_WEIGHT: "100"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
DOLPHINSCHEDULER_ENV:
|
||||
- "export HADOOP_HOME=/opt/soft/hadoop"
|
||||
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
|
||||
- "export SPARK_HOME1=/opt/soft/spark1"
|
||||
- "export SPARK_HOME2=/opt/soft/spark2"
|
||||
- "export PYTHON_HOME=/opt/soft/python"
|
||||
- "export JAVA_HOME=/opt/soft/java"
|
||||
- "export HIVE_HOME=/opt/soft/hive"
|
||||
- "export FLINK_HOME=/opt/soft/flink"
|
||||
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
@ -235,38 +241,40 @@ worker:
|
||||
storage: "20Gi"
|
||||
|
||||
alert:
|
||||
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
|
||||
replicas: "1"
|
||||
## The deployment strategy to use to replace existing pods with new ones.
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
# The jvm options for java instance startup
|
||||
jvmOptions: ""
|
||||
resources: {}
|
||||
# limits:
|
||||
# memory: "4Gi"
|
||||
# cpu: "1"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
# You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
# Clients such as tools and libraries can retrieve this metadata.
|
||||
## You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
## Clients such as tools and libraries can retrieve this metadata.
|
||||
annotations: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
## NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
## Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
affinity: {}
|
||||
## Compute Resources required by this container. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
|
||||
nodeSelector: {}
|
||||
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
|
||||
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# memory: "2Gi"
|
||||
# cpu: "1"
|
||||
# requests:
|
||||
# memory: "1Gi"
|
||||
# cpu: "500m"
|
||||
## ConfigMap
|
||||
configmap:
|
||||
DOLPHINSCHEDULER_OPTS: ""
|
||||
ALERT_PLUGIN_DIR: "/opt/dolphinscheduler/alert/plugin"
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
@ -282,6 +290,8 @@ alert:
|
||||
ENTERPRISE_WECHAT_SECRET: ""
|
||||
ENTERPRISE_WECHAT_AGENT_ID: ""
|
||||
ENTERPRISE_WECHAT_USERS: ""
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
@ -298,10 +308,8 @@ alert:
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
|
||||
## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
@ -310,92 +318,40 @@ alert:
|
||||
storage: "20Gi"
|
||||
|
||||
api:
|
||||
## Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
|
||||
replicas: "1"
|
||||
## The deployment strategy to use to replace existing pods with new ones.
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
# The jvm options for java instance startup
|
||||
jvmOptions: ""
|
||||
resources: {}
|
||||
# limits:
|
||||
# memory: "4Gi"
|
||||
# cpu: "2"
|
||||
# requests:
|
||||
# memory: "2Gi"
|
||||
# cpu: "500m"
|
||||
# You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
# Clients such as tools and libraries can retrieve this metadata.
|
||||
## You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
## Clients such as tools and libraries can retrieve this metadata.
|
||||
annotations: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
frontend:
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
## NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
## Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
affinity: {}
|
||||
## Compute Resources required by this container. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
|
||||
nodeSelector: {}
|
||||
## Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
## effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
## Affinity is a group of affinity scheduling rules. If specified, the pod's scheduling constraints.
|
||||
## More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
resources: {}
|
||||
# limits:
|
||||
# memory: "256Mi"
|
||||
# resources:
|
||||
# limits:
|
||||
# memory: "2Gi"
|
||||
# cpu: "1"
|
||||
# requests:
|
||||
# memory: "256Mi"
|
||||
# memory: "1Gi"
|
||||
# cpu: "500m"
|
||||
# You can use annotations to attach arbitrary non-identifying metadata to objects.
|
||||
# Clients such as tools and libraries can retrieve this metadata.
|
||||
annotations: {}
|
||||
## ConfigMap
|
||||
configmap:
|
||||
DOLPHINSCHEDULER_OPTS: ""
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
@ -414,10 +370,8 @@ frontend:
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
## PersistentVolumeClaim represents a reference to a PersistentVolumeClaim in the same namespace.
|
||||
## More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -34,8 +33,10 @@ public class DingTalkAlertChannel implements AlertChannel {
|
||||
public AlertResult process(AlertInfo alertInfo) {
|
||||
|
||||
AlertData alertData = alertInfo.getAlertData();
|
||||
String alertParams = alertInfo.getAlertParams();
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
|
||||
Map<String, String> paramsMap = alertInfo.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "ding talk params is null");
|
||||
}
|
||||
return new DingTalkSender(paramsMap).sendDingTalkMsg(alertData.getTitle(), alertData.getContent());
|
||||
}
|
||||
}
|
||||
|
@ -22,36 +22,24 @@ package org.apache.dolphinscheduler.plugin.alert.dingtalk;
|
||||
*/
|
||||
public class DingTalkParamsConstants {
|
||||
|
||||
|
||||
static final String DING_TALK_PROXY_ENABLE = "isEnableProxy";
|
||||
static final String DING_TALK_WEB_HOOK = "webhook";
|
||||
static final String NAME_DING_TALK_WEB_HOOK = "WebHook";
|
||||
static final String DING_TALK_KEYWORD = "keyword";
|
||||
static final String NAME_DING_TALK_KEYWORD = "Keyword";
|
||||
static final String NAME_DING_TALK_PROXY_ENABLE = "IsEnableProxy";
|
||||
static final String DING_TALK_PROXY = "proxy";
|
||||
static final String NAME_DING_TALK_PROXY = "Proxy";
|
||||
static final String DING_TALK_PORT = "port";
|
||||
static final String NAME_DING_TALK_PORT = "Port";
|
||||
static final String DING_TALK_USER = "user";
|
||||
static final String NAME_DING_TALK_USER = "User";
|
||||
static final String DING_TALK_PASSWORD = "password";
|
||||
static final String NAME_DING_TALK_PASSWORD = "Password";
|
||||
|
||||
private DingTalkParamsConstants() {
|
||||
throw new IllegalStateException("Utility class");
|
||||
}
|
||||
|
||||
static final String DING_TALK_WEB_HOOK = "dingtalk.webhook";
|
||||
|
||||
static final String NAME_DING_TALK_WEB_HOOK = "dingTalkWebHook";
|
||||
|
||||
static final String DING_TALK_KEYWORD = "dingtalk.keyword";
|
||||
|
||||
static final String NAME_DING_TALK_KEYWORD = "dingTalkKeyword";
|
||||
|
||||
public static final String DING_TALK_PROXY_ENABLE = "dingtalk.isEnableProxy";
|
||||
|
||||
static final String NAME_DING_TALK_PROXY_ENABLE = "dingTalkIsEnableProxy";
|
||||
|
||||
static final String DING_TALK_PROXY = "dingtalk.proxy";
|
||||
|
||||
static final String NAME_DING_TALK_PROXY = "dingTalkProxy";
|
||||
|
||||
static final String DING_TALK_PORT = "dingtalk.port";
|
||||
|
||||
static final String NAME_DING_TALK_PORT = "dingTalkPort";
|
||||
|
||||
static final String DING_TALK_USER = "dingtalk.user";
|
||||
|
||||
static final String NAME_DING_TALK_USER = "dingTalkUser";
|
||||
|
||||
static final String DING_TALK_PASSWORD = "dingtalk.password";
|
||||
|
||||
static final String NAME_DING_TALK_PASSWORD = "dingTalkPassword";
|
||||
|
||||
}
|
||||
|
@ -75,51 +75,6 @@ public class DingTalkSender {
|
||||
|
||||
}
|
||||
|
||||
public AlertResult sendDingTalkMsg(String msg, String charset) {
|
||||
AlertResult alertResult;
|
||||
try {
|
||||
String resp = sendMsg(msg, charset);
|
||||
return checkSendDingTalkSendMsgResult(resp);
|
||||
} catch (Exception e) {
|
||||
logger.info("send ding talk alert msg exception : {}", e.getMessage());
|
||||
alertResult = new AlertResult();
|
||||
alertResult.setStatus("false");
|
||||
alertResult.setMessage("send ding talk alert fail.");
|
||||
}
|
||||
return alertResult;
|
||||
}
|
||||
|
||||
private String sendMsg(String msg, String charset) throws IOException {
|
||||
|
||||
String msgToJson = textToJsonString(msg + "#" + keyword);
|
||||
HttpPost httpPost = constructHttpPost(url, msgToJson, charset);
|
||||
|
||||
CloseableHttpClient httpClient;
|
||||
if (Boolean.TRUE.equals(enableProxy)) {
|
||||
httpClient = getProxyClient(proxy, port, user, password);
|
||||
RequestConfig rcf = getProxyConfig(proxy, port);
|
||||
httpPost.setConfig(rcf);
|
||||
} else {
|
||||
httpClient = getDefaultClient();
|
||||
}
|
||||
|
||||
try {
|
||||
CloseableHttpResponse response = httpClient.execute(httpPost);
|
||||
String resp;
|
||||
try {
|
||||
HttpEntity entity = response.getEntity();
|
||||
resp = EntityUtils.toString(entity, charset);
|
||||
EntityUtils.consume(entity);
|
||||
} finally {
|
||||
response.close();
|
||||
}
|
||||
logger.info("Ding Talk send {}, resp: {}", msg, resp);
|
||||
return resp;
|
||||
} finally {
|
||||
httpClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpPost constructHttpPost(String url, String msg, String charset) {
|
||||
HttpPost post = new HttpPost(url);
|
||||
StringEntity entity = new StringEntity(msg, charset);
|
||||
@ -155,27 +110,6 @@ public class DingTalkSender {
|
||||
return JSONUtils.toJsonString(items);
|
||||
}
|
||||
|
||||
public static class DingTalkSendMsgResponse {
|
||||
private Integer errcode;
|
||||
private String errmsg;
|
||||
|
||||
public Integer getErrcode() {
|
||||
return errcode;
|
||||
}
|
||||
|
||||
public void setErrcode(Integer errcode) {
|
||||
this.errcode = errcode;
|
||||
}
|
||||
|
||||
public String getErrmsg() {
|
||||
return errmsg;
|
||||
}
|
||||
|
||||
public void setErrmsg(String errmsg) {
|
||||
this.errmsg = errmsg;
|
||||
}
|
||||
}
|
||||
|
||||
private static AlertResult checkSendDingTalkSendMsgResult(String result) {
|
||||
AlertResult alertResult = new AlertResult();
|
||||
alertResult.setStatus("false");
|
||||
@ -201,4 +135,70 @@ public class DingTalkSender {
|
||||
return alertResult;
|
||||
}
|
||||
|
||||
public AlertResult sendDingTalkMsg(String title, String content) {
|
||||
AlertResult alertResult;
|
||||
try {
|
||||
String resp = sendMsg(title, content);
|
||||
return checkSendDingTalkSendMsgResult(resp);
|
||||
} catch (Exception e) {
|
||||
logger.info("send ding talk alert msg exception : {}", e.getMessage());
|
||||
alertResult = new AlertResult();
|
||||
alertResult.setStatus("false");
|
||||
alertResult.setMessage("send ding talk alert fail.");
|
||||
}
|
||||
return alertResult;
|
||||
}
|
||||
|
||||
private String sendMsg(String title, String content) throws IOException {
|
||||
|
||||
String msgToJson = textToJsonString(title + content + "#" + keyword);
|
||||
HttpPost httpPost = constructHttpPost(url, msgToJson, "UTF-8");
|
||||
|
||||
CloseableHttpClient httpClient;
|
||||
if (Boolean.TRUE.equals(enableProxy)) {
|
||||
httpClient = getProxyClient(proxy, port, user, password);
|
||||
RequestConfig rcf = getProxyConfig(proxy, port);
|
||||
httpPost.setConfig(rcf);
|
||||
} else {
|
||||
httpClient = getDefaultClient();
|
||||
}
|
||||
|
||||
try {
|
||||
CloseableHttpResponse response = httpClient.execute(httpPost);
|
||||
String resp;
|
||||
try {
|
||||
HttpEntity entity = response.getEntity();
|
||||
resp = EntityUtils.toString(entity, "UTF-8");
|
||||
EntityUtils.consume(entity);
|
||||
} finally {
|
||||
response.close();
|
||||
}
|
||||
logger.info("Ding Talk send title :{},content : {}, resp: {}", title, content, resp);
|
||||
return resp;
|
||||
} finally {
|
||||
httpClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static class DingTalkSendMsgResponse {
|
||||
private Integer errcode;
|
||||
private String errmsg;
|
||||
|
||||
public Integer getErrcode() {
|
||||
return errcode;
|
||||
}
|
||||
|
||||
public void setErrcode(Integer errcode) {
|
||||
this.errcode = errcode;
|
||||
}
|
||||
|
||||
public String getErrmsg() {
|
||||
return errmsg;
|
||||
}
|
||||
|
||||
public void setErrmsg(String errmsg) {
|
||||
this.errmsg = errmsg;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ public class DingTalkSenderTest {
|
||||
dingTalkSender.sendDingTalkMsg("keyWord+Welcome", "UTF-8");
|
||||
dingTalkConfig.put(DingTalkParamsConstants.NAME_DING_TALK_PROXY_ENABLE, "true");
|
||||
dingTalkSender = new DingTalkSender(dingTalkConfig);
|
||||
AlertResult alertResult = dingTalkSender.sendDingTalkMsg("keyWord+Welcome", "UTF-8");
|
||||
AlertResult alertResult = dingTalkSender.sendDingTalkMsg("title", "content test");
|
||||
Assert.assertEquals("false",alertResult.getStatus());
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -38,8 +37,10 @@ public class EmailAlertChannel implements AlertChannel {
|
||||
public AlertResult process(AlertInfo info) {
|
||||
|
||||
AlertData alert = info.getAlertData();
|
||||
String alertParams = info.getAlertParams();
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
|
||||
Map<String, String> paramsMap = info.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "mail params is null");
|
||||
}
|
||||
MailSender mailSender = new MailSender(paramsMap);
|
||||
AlertResult alertResult = mailSender.sendMails(alert.getTitle(), alert.getContent());
|
||||
|
||||
|
@ -33,33 +33,33 @@ public class MailParamsConstants {
|
||||
public static final String NAME_PLUGIN_DEFAULT_EMAIL_RECEIVERCCS = "receiverCcs";
|
||||
|
||||
public static final String MAIL_PROTOCOL = "transport.protocol";
|
||||
public static final String NAME_MAIL_PROTOCOL = "protocol";
|
||||
public static final String NAME_MAIL_PROTOCOL = "mail.protocol";
|
||||
|
||||
public static final String MAIL_SMTP_HOST = "smtp.host";
|
||||
public static final String MAIL_SMTP_HOST = "mail.smtp.host";
|
||||
public static final String NAME_MAIL_SMTP_HOST = "serverHost";
|
||||
|
||||
public static final String MAIL_SMTP_PORT = "smtp.port";
|
||||
public static final String MAIL_SMTP_PORT = "mail.smtp.port";
|
||||
public static final String NAME_MAIL_SMTP_PORT = "serverPort";
|
||||
|
||||
public static final String MAIL_SENDER = "sender";
|
||||
public static final String MAIL_SENDER = "mail.sender";
|
||||
public static final String NAME_MAIL_SENDER = "sender";
|
||||
|
||||
public static final String MAIL_SMTP_AUTH = "smtp.auth";
|
||||
public static final String MAIL_SMTP_AUTH = "mail.smtp.auth";
|
||||
public static final String NAME_MAIL_SMTP_AUTH = "enableSmtpAuth";
|
||||
|
||||
public static final String MAIL_USER = "user";
|
||||
public static final String MAIL_USER = "mail.user";
|
||||
public static final String NAME_MAIL_USER = "user";
|
||||
|
||||
public static final String MAIL_PASSWD = "passwd";
|
||||
public static final String MAIL_PASSWD = "mail.passwd";
|
||||
public static final String NAME_MAIL_PASSWD = "passwd";
|
||||
|
||||
public static final String MAIL_SMTP_STARTTLS_ENABLE = "smtp.starttls.enable";
|
||||
public static final String MAIL_SMTP_STARTTLS_ENABLE = "mail.smtp.starttls.enable";
|
||||
public static final String NAME_MAIL_SMTP_STARTTLS_ENABLE = "starttlsEnable";
|
||||
|
||||
public static final String MAIL_SMTP_SSL_ENABLE = "smtp.ssl.enable";
|
||||
public static final String MAIL_SMTP_SSL_ENABLE = "mail.smtp.ssl.enable";
|
||||
public static final String NAME_MAIL_SMTP_SSL_ENABLE = "sslEnable";
|
||||
|
||||
public static final String MAIL_SMTP_SSL_TRUST = "smtp.ssl.trust";
|
||||
public static final String MAIL_SMTP_SSL_TRUST = "mail.smtp.ssl.trust";
|
||||
public static final String NAME_MAIL_SMTP_SSL_TRUST = "smtpSslTrust";
|
||||
|
||||
}
|
||||
|
@ -19,13 +19,10 @@ package org.apache.dolphinscheduler.plugin.alert.email;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
@ -36,29 +33,13 @@ import org.junit.Test;
|
||||
*/
|
||||
public class EmailAlertChannelFactoryTest {
|
||||
|
||||
@Before
|
||||
public void before() throws Exception {
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws Exception {
|
||||
}
|
||||
|
||||
/**
|
||||
* Method: getName()
|
||||
*/
|
||||
@Test
|
||||
public void testGetName() throws Exception {
|
||||
}
|
||||
|
||||
/**
|
||||
* Method: getParams()
|
||||
*/
|
||||
@Test
|
||||
public void testGetParams() throws Exception {
|
||||
public void testGetParams() {
|
||||
EmailAlertChannelFactory emailAlertChannelFactory = new EmailAlertChannelFactory();
|
||||
List<PluginParams> params = emailAlertChannelFactory.getParams();
|
||||
System.out.println(JSONUtils.toJsonString(params));
|
||||
Assert.assertEquals(12, params.size());
|
||||
}
|
||||
|
||||
@ -66,7 +47,7 @@ public class EmailAlertChannelFactoryTest {
|
||||
* Method: create()
|
||||
*/
|
||||
@Test
|
||||
public void testCreate() throws Exception {
|
||||
public void testCreate() {
|
||||
EmailAlertChannelFactory emailAlertChannelFactory = new EmailAlertChannelFactory();
|
||||
AlertChannel alertChannel = emailAlertChannelFactory.create();
|
||||
Assert.assertNotNull(alertChannel);
|
||||
|
@ -24,6 +24,7 @@ import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.alert.ShowType;
|
||||
import org.apache.dolphinscheduler.spi.params.InputParam;
|
||||
import org.apache.dolphinscheduler.spi.params.PasswordParam;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
import org.apache.dolphinscheduler.spi.params.RadioParam;
|
||||
import org.apache.dolphinscheduler.spi.params.base.DataType;
|
||||
import org.apache.dolphinscheduler.spi.params.base.ParamsOptions;
|
||||
@ -34,6 +35,7 @@ import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
@ -66,7 +68,9 @@ public class EmailAlertChannelTest {
|
||||
.setTitle("test");
|
||||
AlertInfo alertInfo = new AlertInfo();
|
||||
alertInfo.setAlertData(alertData);
|
||||
alertInfo.setAlertParams(getEmailAlertParams());
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(getEmailAlertParams());
|
||||
|
||||
alertInfo.setAlertParams(paramsMap);
|
||||
AlertResult alertResult = emailAlertChannel.process(alertInfo);
|
||||
Assert.assertNotNull(alertResult);
|
||||
Assert.assertEquals("false", alertResult.getStatus());
|
||||
|
@ -53,6 +53,7 @@ public class MailUtilsTest {
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_SENDER, "xxx1.xxx.com");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_USER, "xxx2.xxx.com");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_PASSWD, "111111");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_AUTH, "true");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_STARTTLS_ENABLE, "true");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_SSL_ENABLE, "false");
|
||||
emailConfig.put(MailParamsConstants.NAME_MAIL_SMTP_SSL_TRUST, "false");
|
||||
|
@ -0,0 +1,82 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<parent>
|
||||
<artifactId>dolphinscheduler-alert-plugin</artifactId>
|
||||
<groupId>org.apache.dolphinscheduler</groupId>
|
||||
<version>1.3.4-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<groupId>org.apache.dolphinscheduler</groupId>
|
||||
<artifactId>dolphinscheduler-alert-feishu</artifactId>
|
||||
<packaging>dolphinscheduler-plugin</packaging>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.dolphinscheduler</groupId>
|
||||
<artifactId>dolphinscheduler-spi</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.httpcomponents</groupId>
|
||||
<artifactId>httpclient</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>ch.qos.logback</groupId>
|
||||
<artifactId>logback-classic</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-annotations</artifactId>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<type>jar</type>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<finalName>dolphinscheduler-alert-feishu-${project.version}</finalName>
|
||||
</build>
|
||||
|
||||
</project>
|
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class FeiShuAlertChannel implements AlertChannel {
|
||||
@Override
|
||||
public AlertResult process(AlertInfo alertInfo) {
|
||||
|
||||
AlertData alertData = alertInfo.getAlertData();
|
||||
Map<String, String> paramsMap = alertInfo.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "fei shu params is null");
|
||||
}
|
||||
return new FeiShuSender(paramsMap).sendFeiShuMsg(alertData);
|
||||
}
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory;
|
||||
import org.apache.dolphinscheduler.spi.params.InputParam;
|
||||
import org.apache.dolphinscheduler.spi.params.PasswordParam;
|
||||
import org.apache.dolphinscheduler.spi.params.RadioParam;
|
||||
import org.apache.dolphinscheduler.spi.params.base.ParamsOptions;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
import org.apache.dolphinscheduler.spi.params.base.Validate;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class FeiShuAlertChannelFactory implements AlertChannelFactory {
|
||||
@Override
|
||||
public String getName() {
|
||||
return "Feishu";
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<PluginParams> getParams() {
|
||||
InputParam webHookParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_WEB_HOOK, FeiShuParamsConstants.WEB_HOOK)
|
||||
.addValidate(Validate.newBuilder()
|
||||
.setRequired(true)
|
||||
.build())
|
||||
.build();
|
||||
RadioParam isEnableProxy =
|
||||
RadioParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE, FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE)
|
||||
.addParamsOptions(new ParamsOptions("YES", true, false))
|
||||
.addParamsOptions(new ParamsOptions("NO", false, false))
|
||||
.setValue(true)
|
||||
.addValidate(Validate.newBuilder()
|
||||
.setRequired(false)
|
||||
.build())
|
||||
.build();
|
||||
InputParam proxyParam =
|
||||
InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PROXY, FeiShuParamsConstants.FEI_SHU_PROXY)
|
||||
.addValidate(Validate.newBuilder()
|
||||
.setRequired(false).build())
|
||||
.build();
|
||||
|
||||
InputParam portParam = InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PORT, FeiShuParamsConstants.FEI_SHU_PORT)
|
||||
.addValidate(Validate.newBuilder()
|
||||
.setRequired(false).build())
|
||||
.build();
|
||||
|
||||
InputParam userParam =
|
||||
InputParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_USER, FeiShuParamsConstants.FEI_SHU_USER)
|
||||
.addValidate(Validate.newBuilder()
|
||||
.setRequired(false).build())
|
||||
.build();
|
||||
PasswordParam passwordParam = PasswordParam.newBuilder(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD, FeiShuParamsConstants.FEI_SHU_PASSWORD)
|
||||
.setPlaceholder("if enable use authentication, you need input password")
|
||||
.build();
|
||||
|
||||
return Arrays.asList(webHookParam, isEnableProxy, proxyParam, portParam, userParam, passwordParam);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public AlertChannel create() {
|
||||
return new FeiShuAlertChannel();
|
||||
}
|
||||
}
|
@ -15,26 +15,16 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.alert.manager;
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.dao.entity.Alert;
|
||||
import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* SMS send manager
|
||||
*/
|
||||
public class MsgManager {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(MsgManager.class);
|
||||
|
||||
/**
|
||||
* SMS send
|
||||
*
|
||||
* @param alert the alert
|
||||
*/
|
||||
public void send(Alert alert) {
|
||||
logger.info("send message {}", alert);
|
||||
public class FeiShuAlertPlugin implements DolphinSchedulerPlugin {
|
||||
@Override
|
||||
public Iterable<AlertChannelFactory> getAlertChannelFactorys() {
|
||||
return ImmutableList.of(new FeiShuAlertChannelFactory());
|
||||
}
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
public class FeiShuParamsConstants {
|
||||
|
||||
private FeiShuParamsConstants() {
|
||||
throw new IllegalStateException("Utility class");
|
||||
}
|
||||
|
||||
static final String WEB_HOOK = "webhook";
|
||||
|
||||
static final String NAME_WEB_HOOK = "webHook";
|
||||
|
||||
public static final String FEI_SHU_PROXY_ENABLE = "isEnableProxy";
|
||||
|
||||
static final String NAME_FEI_SHU_PROXY_ENABLE = "isEnableProxy";
|
||||
|
||||
static final String FEI_SHU_PROXY = "proxy";
|
||||
|
||||
static final String NAME_FEI_SHU_PROXY = "proxy";
|
||||
|
||||
static final String FEI_SHU_PORT = "port";
|
||||
|
||||
static final String NAME_FEI_SHU_PORT = "port";
|
||||
|
||||
static final String FEI_SHU_USER = "user";
|
||||
|
||||
static final String NAME_FEI_SHU_USER = "user";
|
||||
|
||||
static final String FEI_SHU_PASSWORD = "password";
|
||||
|
||||
static final String NAME_FEI_SHU_PASSWORD = "password";
|
||||
}
|
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
|
||||
import org.apache.commons.codec.binary.StringUtils;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
public class FeiShuSender {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(FeiShuSender.class);
|
||||
|
||||
private String url;
|
||||
|
||||
private Boolean enableProxy;
|
||||
|
||||
private String proxy;
|
||||
|
||||
private Integer port;
|
||||
|
||||
private String user;
|
||||
|
||||
private String password;
|
||||
|
||||
FeiShuSender(Map<String, String> config) {
|
||||
url = config.get(FeiShuParamsConstants.NAME_WEB_HOOK);
|
||||
enableProxy = Boolean.valueOf(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY_ENABLE));
|
||||
if (Boolean.TRUE.equals(enableProxy)) {
|
||||
port = Integer.parseInt(config.get(FeiShuParamsConstants.NAME_FEI_SHU_PORT));
|
||||
proxy = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PROXY);
|
||||
user = config.get(FeiShuParamsConstants.NAME_FEI_SHU_USER);
|
||||
password = config.get(FeiShuParamsConstants.NAME_FEI_SHU_PASSWORD);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static String textToJsonString(AlertData alertData) {
|
||||
|
||||
Map<String, Object> items = new HashMap<>(2);
|
||||
items.put("msg_type", "text");
|
||||
Map<String, String> textContent = new HashMap<>();
|
||||
byte[] byt = StringUtils.getBytesUtf8(formatContent(alertData));
|
||||
String txt = StringUtils.newStringUtf8(byt);
|
||||
textContent.put("text", txt);
|
||||
items.put("content", textContent);
|
||||
return JSONUtils.toJsonString(items);
|
||||
}
|
||||
|
||||
public static AlertResult checkSendFeiShuSendMsgResult(String result) {
|
||||
AlertResult alertResult = new AlertResult();
|
||||
alertResult.setStatus("false");
|
||||
|
||||
if (org.apache.dolphinscheduler.spi.utils.StringUtils.isBlank(result)) {
|
||||
alertResult.setMessage("send fei shu msg error");
|
||||
logger.info("send fei shu msg error,fei shu server resp is null");
|
||||
return alertResult;
|
||||
}
|
||||
FeiShuSendMsgResponse sendMsgResponse = JSONUtils.parseObject(result, FeiShuSendMsgResponse.class);
|
||||
|
||||
if (null == sendMsgResponse) {
|
||||
alertResult.setMessage("send fei shu msg fail");
|
||||
logger.info("send fei shu msg error,resp error");
|
||||
return alertResult;
|
||||
}
|
||||
if (sendMsgResponse.statusCode == 0) {
|
||||
alertResult.setStatus("true");
|
||||
alertResult.setMessage("send fei shu msg success");
|
||||
return alertResult;
|
||||
}
|
||||
alertResult.setMessage(String.format("alert send fei shu msg error : %s", sendMsgResponse.getStatusMessage()));
|
||||
logger.info("alert send fei shu msg error : {} ,Extra : {} ", sendMsgResponse.getStatusMessage(), sendMsgResponse.getExtra());
|
||||
return alertResult;
|
||||
}
|
||||
|
||||
public static String formatContent(AlertData alertData) {
|
||||
if (alertData.getContent() != null) {
|
||||
|
||||
List<Map> list = JSONUtils.toList(alertData.getContent(), Map.class);
|
||||
if (list.isEmpty()) {
|
||||
return alertData.getTitle() + alertData.getContent();
|
||||
}
|
||||
|
||||
StringBuilder contents = new StringBuilder(100);
|
||||
contents.append(String.format("`%s`%n", alertData.getTitle()));
|
||||
for (Map map : list) {
|
||||
Iterator<Entry<String, Object>> entries = map.entrySet().iterator();
|
||||
while (entries.hasNext()) {
|
||||
Entry<String, Object> entry = entries.next();
|
||||
String key = entry.getKey();
|
||||
String value = entry.getValue().toString();
|
||||
contents.append(key + ":" + value);
|
||||
contents.append("\n");
|
||||
}
|
||||
}
|
||||
return contents.toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public AlertResult sendFeiShuMsg(AlertData alertData) {
|
||||
AlertResult alertResult;
|
||||
try {
|
||||
String resp = sendMsg(alertData);
|
||||
return checkSendFeiShuSendMsgResult(resp);
|
||||
} catch (Exception e) {
|
||||
logger.info("send fei shu alert msg exception : {}", e.getMessage());
|
||||
alertResult = new AlertResult();
|
||||
alertResult.setStatus("false");
|
||||
alertResult.setMessage("send fei shu alert fail.");
|
||||
}
|
||||
return alertResult;
|
||||
}
|
||||
|
||||
private String sendMsg(AlertData alertData) throws IOException {
|
||||
|
||||
String msgToJson = textToJsonString(alertData);
|
||||
|
||||
HttpPost httpPost = HttpRequestUtil.constructHttpPost(url, msgToJson);
|
||||
|
||||
CloseableHttpClient httpClient;
|
||||
|
||||
httpClient = HttpRequestUtil.getHttpClient(enableProxy, proxy, port, user, password);
|
||||
|
||||
try {
|
||||
CloseableHttpResponse response = httpClient.execute(httpPost);
|
||||
|
||||
int statusCode = response.getStatusLine().getStatusCode();
|
||||
if (statusCode != HttpStatus.SC_OK) {
|
||||
logger.error("send feishu message error, return http status code: {} ", statusCode);
|
||||
}
|
||||
String resp;
|
||||
try {
|
||||
HttpEntity entity = response.getEntity();
|
||||
resp = EntityUtils.toString(entity, "utf-8");
|
||||
EntityUtils.consume(entity);
|
||||
} finally {
|
||||
response.close();
|
||||
}
|
||||
logger.info("Fei Shu send title :{} ,content :{}, resp: {}", alertData.getTitle(), alertData.getContent(), resp);
|
||||
return resp;
|
||||
} finally {
|
||||
httpClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static class FeiShuSendMsgResponse {
|
||||
@JsonProperty("Extra")
|
||||
private String extra;
|
||||
@JsonProperty("StatusCode")
|
||||
private Integer statusCode;
|
||||
@JsonProperty("StatusMessage")
|
||||
private String statusMessage;
|
||||
|
||||
public String getExtra() {
|
||||
return extra;
|
||||
}
|
||||
|
||||
public void setExtra(String extra) {
|
||||
this.extra = extra;
|
||||
}
|
||||
|
||||
public Integer getStatusCode() {
|
||||
return statusCode;
|
||||
}
|
||||
|
||||
public void setStatusCode(Integer statusCode) {
|
||||
this.statusCode = statusCode;
|
||||
}
|
||||
|
||||
public String getStatusMessage() {
|
||||
return statusMessage;
|
||||
}
|
||||
|
||||
public void setStatusMessage(String statusMessage) {
|
||||
this.statusMessage = statusMessage;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
|
||||
public class HttpRequestUtil {
|
||||
|
||||
public static CloseableHttpClient getHttpClient(boolean enableProxy, String proxy, Integer port, String user, String password) {
|
||||
if (enableProxy) {
|
||||
HttpHost httpProxy = new HttpHost(proxy, port);
|
||||
CredentialsProvider provider = new BasicCredentialsProvider();
|
||||
provider.setCredentials(new AuthScope(httpProxy), new UsernamePasswordCredentials(user, password));
|
||||
return HttpClients.custom().setDefaultCredentialsProvider(provider).build();
|
||||
} else {
|
||||
return HttpClients.createDefault();
|
||||
}
|
||||
}
|
||||
|
||||
public static HttpPost constructHttpPost(String url, String msg) {
|
||||
HttpPost post = new HttpPost(url);
|
||||
StringEntity entity = new StringEntity(msg, ContentType.APPLICATION_JSON);
|
||||
post.setEntity(entity);
|
||||
return post;
|
||||
}
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
public class FeiShuAlertChannelFactoryTest {
|
||||
|
||||
@Test
|
||||
public void testGetParams() {
|
||||
FeiShuAlertChannelFactory feiShuAlertChannelFactory = new FeiShuAlertChannelFactory();
|
||||
List<PluginParams> params = feiShuAlertChannelFactory.getParams();
|
||||
JSONUtils.toJsonString(params);
|
||||
Assert.assertEquals(6, params.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreate() {
|
||||
FeiShuAlertChannelFactory feiShuAlertChannelFactory = new FeiShuAlertChannelFactory();
|
||||
AlertChannel alertChannel = feiShuAlertChannelFactory.create();
|
||||
Assert.assertNotNull(alertChannel);
|
||||
}
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.plugin.alert.feishu;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class FeiShuSenderTest {
|
||||
|
||||
|
||||
private static Map<String, String> feiShuConfig = new HashMap<>();
|
||||
|
||||
@Before
|
||||
public void initFeiShuConfig() {
|
||||
feiShuConfig.put(FeiShuParamsConstants.WEB_HOOK, "https://open.feishu.cn/open-apis/bot/v2/hook/xxxxx");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSend() {
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setTitle("feishu test title");
|
||||
alertData.setContent("feishu test content");
|
||||
FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig);
|
||||
AlertResult alertResult = feiShuSender.sendFeiShuMsg(alertData);
|
||||
Assert.assertEquals("false", alertResult.getStatus());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFormatContent() {
|
||||
String alertMsg = "[\n"
|
||||
+ " {\n"
|
||||
+ " \"owner\": \"dolphinscheduler\",\n"
|
||||
+ " \"processEndTime\": \"2021-01-29 19:01:11\",\n"
|
||||
+ " \"processHost\": \"10.81.129.4:5678\",\n"
|
||||
+ " \"processId\": 2926,\n"
|
||||
+ " \"processName\": \"3-20210129190038108\",\n"
|
||||
+ " \"processStartTime\": \"2021-01-29 19:00:38\",\n"
|
||||
+ " \"processState\": \"SUCCESS\",\n"
|
||||
+ " \"processType\": \"START_PROCESS\",\n"
|
||||
+ " \"projectId\": 2,\n"
|
||||
+ " \"projectName\": \"testdelproject\",\n"
|
||||
+ " \"recovery\": \"NO\",\n"
|
||||
+ " \"retryTimes\": 0,\n"
|
||||
+ " \"runTimes\": 1,\n"
|
||||
+ " \"taskId\": 0\n"
|
||||
+ " }\n"
|
||||
+ "]";
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setTitle("");
|
||||
alertData.setContent(alertMsg);
|
||||
Assert.assertNotNull(FeiShuSender.formatContent(alertData));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSendWithFormatException() {
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setTitle("feishu test title");
|
||||
alertData.setContent("feishu test content");
|
||||
FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig);
|
||||
String alertResult = feiShuSender.formatContent(alertData);
|
||||
Assert.assertEquals(alertResult, alertData.getTitle() + alertData.getContent());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckSendFeiShuSendMsgResult() {
|
||||
|
||||
FeiShuSender feiShuSender = new FeiShuSender(feiShuConfig);
|
||||
AlertResult alertResult = feiShuSender.checkSendFeiShuSendMsgResult("");
|
||||
Assert.assertFalse(Boolean.valueOf(alertResult.getStatus()));
|
||||
AlertResult alertResult2 = feiShuSender.checkSendFeiShuSendMsgResult("123");
|
||||
Assert.assertEquals("send fei shu msg fail",alertResult2.getMessage());
|
||||
|
||||
String response = "{\"StatusCode\":\"0\",\"extra\":\"extra\",\"StatusMessage\":\"StatusMessage\"}";
|
||||
AlertResult alertResult3 = feiShuSender.checkSendFeiShuSendMsgResult(response);
|
||||
Assert.assertTrue(Boolean.valueOf(alertResult3.getStatus()));
|
||||
}
|
||||
}
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -33,8 +32,10 @@ public class HttpAlertChannel implements AlertChannel {
|
||||
public AlertResult process(AlertInfo alertInfo) {
|
||||
|
||||
AlertData alertData = alertInfo.getAlertData();
|
||||
String alertParams = alertInfo.getAlertParams();
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
|
||||
Map<String, String> paramsMap = alertInfo.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "http params is null");
|
||||
}
|
||||
|
||||
return new HttpSender(paramsMap).send(alertData.getContent());
|
||||
}
|
||||
|
@ -21,12 +21,14 @@ import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.InputParam;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
import org.apache.dolphinscheduler.spi.params.base.Validate;
|
||||
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
@ -45,7 +47,7 @@ public class HttpAlertChannelTest {
|
||||
alertData.setContent("Fault tolerance warning");
|
||||
alertInfo.setAlertData(alertData);
|
||||
AlertResult alertResult = alertChannel.process(alertInfo);
|
||||
Assert.assertEquals("Request types are not supported", alertResult.getMessage());
|
||||
Assert.assertEquals("http params is null", alertResult.getMessage());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -56,7 +58,8 @@ public class HttpAlertChannelTest {
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setContent("Fault tolerance warning");
|
||||
alertInfo.setAlertData(alertData);
|
||||
alertInfo.setAlertParams(getParams());
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(getParams());
|
||||
alertInfo.setAlertParams(paramsMap);
|
||||
AlertResult alertResult = alertChannel.process(alertInfo);
|
||||
Assert.assertEquals("true", alertResult.getStatus());
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -33,8 +32,10 @@ public class ScriptAlertChannel implements AlertChannel {
|
||||
@Override
|
||||
public AlertResult process(AlertInfo alertinfo) {
|
||||
AlertData alertData = alertinfo.getAlertData();
|
||||
String alertParams = alertinfo.getAlertParams();
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
|
||||
Map<String, String> paramsMap = alertinfo.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "ding talk params is null");
|
||||
}
|
||||
return new ScriptSender(paramsMap).sendScriptAlert(alertData.getTitle());
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ package org.apache.dolphinscheduler.plugin.alert.script;
|
||||
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
import org.apache.dolphinscheduler.spi.utils.JSONUtils;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@ -35,7 +34,6 @@ public class ScriptAlertChannelFactoryTest {
|
||||
public void testGetParams() {
|
||||
ScriptAlertChannelFactory scriptAlertChannelFactory = new ScriptAlertChannelFactory();
|
||||
List<PluginParams> params = scriptAlertChannelFactory.getParams();
|
||||
JSONUtils.toJsonString(params);
|
||||
Assert.assertEquals(3, params.size());
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.spi.alert.AlertChannel;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertData;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertInfo;
|
||||
import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@ -33,8 +32,10 @@ public class WeChatAlertChannel implements AlertChannel {
|
||||
@Override
|
||||
public AlertResult process(AlertInfo info) {
|
||||
AlertData alertData = info.getAlertData();
|
||||
String alertParams = info.getAlertParams();
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(alertParams);
|
||||
Map<String, String> paramsMap = info.getAlertParams();
|
||||
if (null == paramsMap) {
|
||||
return new AlertResult("false", "we chat params is null");
|
||||
}
|
||||
return new WeChatSender(paramsMap).sendEnterpriseWeChat(alertData.getTitle(), alertData.getContent());
|
||||
|
||||
}
|
||||
|
@ -35,6 +35,7 @@
|
||||
<module>dolphinscheduler-alert-dingtalk</module>
|
||||
<module>dolphinscheduler-alert-script</module>
|
||||
<module>dolphinscheduler-alert-http</module>
|
||||
<module>dolphinscheduler-alert-feishu</module>
|
||||
</modules>
|
||||
|
||||
|
||||
|
@ -29,7 +29,6 @@ import org.apache.dolphinscheduler.alert.utils.PropertyUtils;
|
||||
import org.apache.dolphinscheduler.common.thread.Stopper;
|
||||
import org.apache.dolphinscheduler.dao.AlertDao;
|
||||
import org.apache.dolphinscheduler.dao.DaoFactory;
|
||||
import org.apache.dolphinscheduler.dao.PluginDao;
|
||||
import org.apache.dolphinscheduler.dao.entity.Alert;
|
||||
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
|
||||
import org.apache.dolphinscheduler.remote.command.CommandType;
|
||||
@ -53,8 +52,6 @@ public class AlertServer {
|
||||
*/
|
||||
private AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class);
|
||||
|
||||
private PluginDao pluginDao = DaoFactory.getDaoInstance(PluginDao.class);
|
||||
|
||||
private AlertSender alertSender;
|
||||
|
||||
private static AlertServer instance;
|
||||
@ -114,7 +111,7 @@ public class AlertServer {
|
||||
NettyServerConfig serverConfig = new NettyServerConfig();
|
||||
serverConfig.setListenPort(ALERT_RPC_PORT);
|
||||
this.server = new NettyRemotingServer(serverConfig);
|
||||
this.server.registerProcessor(CommandType.ALERT_SEND_REQUEST, new AlertRequestProcessor(alertDao, alertPluginManager, pluginDao));
|
||||
this.server.registerProcessor(CommandType.ALERT_SEND_REQUEST, new AlertRequestProcessor(alertDao, alertPluginManager));
|
||||
this.server.start();
|
||||
}
|
||||
|
||||
@ -133,7 +130,7 @@ public class AlertServer {
|
||||
logger.warn("No Alert Plugin . Can not send alert info. ");
|
||||
} else {
|
||||
List<Alert> alerts = alertDao.listWaitExecutionAlert();
|
||||
alertSender = new AlertSender(alerts, alertDao, alertPluginManager, pluginDao);
|
||||
alertSender = new AlertSender(alerts, alertDao, alertPluginManager);
|
||||
alertSender.run();
|
||||
}
|
||||
}
|
||||
|
@ -1,55 +0,0 @@
|
||||
///*
|
||||
// * Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// * contributor license agreements. See the NOTICE file distributed with
|
||||
// * this work for additional information regarding copyright ownership.
|
||||
// * The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// * (the "License"); you may not use this file except in compliance with
|
||||
// * the License. You may obtain a copy of the License at
|
||||
// *
|
||||
// * http://www.apache.org/licenses/LICENSE-2.0
|
||||
// *
|
||||
// * Unless required by applicable law or agreed to in writing, software
|
||||
// * distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// * See the License for the specific language governing permissions and
|
||||
// * limitations under the License.
|
||||
// */
|
||||
//package org.apache.dolphinscheduler.alert.manager;
|
||||
//
|
||||
//import org.apache.dolphinscheduler.alert.utils.MailUtils;
|
||||
//
|
||||
//import java.util.List;
|
||||
//import java.util.Map;
|
||||
//
|
||||
///**
|
||||
// * email send manager
|
||||
// */
|
||||
//public class EmailManager {
|
||||
// /**
|
||||
// * email send
|
||||
// * @param receiversList the receiver list
|
||||
// * @param receiversCcList the cc List
|
||||
// * @param title the title
|
||||
// * @param content the content
|
||||
// * @param showType the showType
|
||||
// * @return the send result
|
||||
// */
|
||||
// public Map<String,Object> send(List<String> receiversList,List<String> receiversCcList,String title,String content,String showType){
|
||||
//
|
||||
// return MailUtils.sendMails(receiversList, receiversCcList, title, content, showType);
|
||||
// }
|
||||
//
|
||||
// /**
|
||||
// * msg send
|
||||
// * @param receiversList the receiver list
|
||||
// * @param title the title
|
||||
// * @param content the content
|
||||
// * @param showType the showType
|
||||
// * @return the send result
|
||||
// */
|
||||
// public Map<String,Object> send(List<String> receiversList,String title,String content,String showType){
|
||||
//
|
||||
// return MailUtils.sendMails(receiversList,title, content, showType);
|
||||
// }
|
||||
//
|
||||
//}
|
@ -31,6 +31,7 @@ import org.apache.dolphinscheduler.spi.classloader.ThreadContextClassLoader;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
@ -47,6 +48,11 @@ public class AlertPluginManager extends AbstractDolphinPluginManager {
|
||||
private final Map<String, AlertChannelFactory> alertChannelFactoryMap = new ConcurrentHashMap<>();
|
||||
private final Map<String, AlertChannel> alertChannelMap = new ConcurrentHashMap<>();
|
||||
|
||||
/**
|
||||
* k->pluginDefineId v->pluginDefineName
|
||||
*/
|
||||
private final Map<Integer, String> pluginDefineMap = new HashMap<>();
|
||||
|
||||
public void addAlertChannelFactory(AlertChannelFactory alertChannelFactory) {
|
||||
requireNonNull(alertChannelFactory, "alertChannelFactory is null");
|
||||
|
||||
@ -83,6 +89,10 @@ public class AlertPluginManager extends AbstractDolphinPluginManager {
|
||||
return alertChannelMap;
|
||||
}
|
||||
|
||||
public String getPluginNameById(int id) {
|
||||
return pluginDefineMap.get(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void installPlugin(DolphinSchedulerPlugin dolphinSchedulerPlugin) {
|
||||
for (AlertChannelFactory alertChannelFactory : dolphinSchedulerPlugin.getAlertChannelFactorys()) {
|
||||
@ -93,7 +103,8 @@ public class AlertPluginManager extends AbstractDolphinPluginManager {
|
||||
String paramsJson = PluginParamsTransfer.transferParamsToJson(params);
|
||||
|
||||
PluginDefine pluginDefine = new PluginDefine(nameEn, PluginType.ALERT.getDesc(), paramsJson);
|
||||
pluginDao.addOrUpdatePluginDefine(pluginDefine);
|
||||
int id = pluginDao.addOrUpdatePluginDefine(pluginDefine);
|
||||
pluginDefineMap.put(id, pluginDefine.getPluginName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager;
|
||||
import org.apache.dolphinscheduler.alert.runner.AlertSender;
|
||||
import org.apache.dolphinscheduler.common.utils.Preconditions;
|
||||
import org.apache.dolphinscheduler.dao.AlertDao;
|
||||
import org.apache.dolphinscheduler.dao.PluginDao;
|
||||
import org.apache.dolphinscheduler.remote.command.Command;
|
||||
import org.apache.dolphinscheduler.remote.command.CommandType;
|
||||
import org.apache.dolphinscheduler.remote.command.alert.AlertSendRequestCommand;
|
||||
@ -35,18 +34,16 @@ import org.slf4j.LoggerFactory;
|
||||
import io.netty.channel.Channel;
|
||||
|
||||
/**
|
||||
* alert request processor
|
||||
* alert request processor
|
||||
*/
|
||||
public class AlertRequestProcessor implements NettyRequestProcessor {
|
||||
|
||||
private final Logger logger = LoggerFactory.getLogger(AlertRequestProcessor.class);
|
||||
private AlertDao alertDao;
|
||||
private PluginDao pluginDao;
|
||||
private AlertPluginManager alertPluginManager;
|
||||
|
||||
public AlertRequestProcessor(AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) {
|
||||
public AlertRequestProcessor(AlertDao alertDao, AlertPluginManager alertPluginManager) {
|
||||
this.alertDao = alertDao;
|
||||
this.pluginDao = pluginDao;
|
||||
this.alertPluginManager = alertPluginManager;
|
||||
}
|
||||
|
||||
@ -59,7 +56,7 @@ public class AlertRequestProcessor implements NettyRequestProcessor {
|
||||
command.getBody(), AlertSendRequestCommand.class);
|
||||
logger.info("received command : {}", alertSendRequestCommand);
|
||||
|
||||
AlertSender alertSender = new AlertSender(alertDao, alertPluginManager, pluginDao);
|
||||
AlertSender alertSender = new AlertSender(alertDao, alertPluginManager);
|
||||
AlertSendResponseCommand alertSendResponseCommand = alertSender.syncHandler(alertSendRequestCommand.getGroupId(), alertSendRequestCommand.getTitle(), alertSendRequestCommand.getContent());
|
||||
channel.writeAndFlush(alertSendResponseCommand.convert2Command(command.getOpaque()));
|
||||
|
||||
|
@ -20,8 +20,8 @@ package org.apache.dolphinscheduler.alert.runner;
|
||||
import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager;
|
||||
import org.apache.dolphinscheduler.common.enums.AlertStatus;
|
||||
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.JSONUtils;
|
||||
import org.apache.dolphinscheduler.dao.AlertDao;
|
||||
import org.apache.dolphinscheduler.dao.PluginDao;
|
||||
import org.apache.dolphinscheduler.dao.entity.Alert;
|
||||
import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance;
|
||||
import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand;
|
||||
@ -33,6 +33,7 @@ import org.apache.dolphinscheduler.spi.alert.AlertResult;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@ -46,25 +47,22 @@ public class AlertSender {
|
||||
|
||||
private List<Alert> alertList;
|
||||
private AlertDao alertDao;
|
||||
private PluginDao pluginDao;
|
||||
private AlertPluginManager alertPluginManager;
|
||||
|
||||
public AlertSender(AlertPluginManager alertPluginManager) {
|
||||
this.alertPluginManager = alertPluginManager;
|
||||
}
|
||||
|
||||
public AlertSender(AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) {
|
||||
public AlertSender(AlertDao alertDao, AlertPluginManager alertPluginManager) {
|
||||
super();
|
||||
this.alertDao = alertDao;
|
||||
this.pluginDao = pluginDao;
|
||||
this.alertPluginManager = alertPluginManager;
|
||||
}
|
||||
|
||||
public AlertSender(List<Alert> alertList, AlertDao alertDao, AlertPluginManager alertPluginManager, PluginDao pluginDao) {
|
||||
public AlertSender(List<Alert> alertList, AlertDao alertDao, AlertPluginManager alertPluginManager) {
|
||||
super();
|
||||
this.alertList = alertList;
|
||||
this.alertDao = alertDao;
|
||||
this.pluginDao = pluginDao;
|
||||
this.alertPluginManager = alertPluginManager;
|
||||
}
|
||||
|
||||
@ -75,13 +73,14 @@ public class AlertSender {
|
||||
List<AlertPluginInstance> alertInstanceList = alertDao.listInstanceByAlertGroupId(alertGroupId);
|
||||
if (CollectionUtils.isEmpty(alertInstanceList)) {
|
||||
logger.error("send alert msg fail,no bind plugin instance.");
|
||||
return;
|
||||
alertDao.updateAlert(AlertStatus.EXECUTION_FAILURE, "no bind plugin instance", alert.getId());
|
||||
continue;
|
||||
}
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setId(alert.getId())
|
||||
.setContent(alert.getContent())
|
||||
.setLog(alert.getLog())
|
||||
.setTitle(alert.getTitle());
|
||||
.setContent(alert.getContent())
|
||||
.setLog(alert.getLog())
|
||||
.setTitle(alert.getTitle());
|
||||
|
||||
for (AlertPluginInstance instance : alertInstanceList) {
|
||||
|
||||
@ -106,8 +105,8 @@ public class AlertSender {
|
||||
|
||||
List<AlertPluginInstance> alertInstanceList = alertDao.listInstanceByAlertGroupId(alertGroupId);
|
||||
AlertData alertData = new AlertData();
|
||||
alertData.setContent(title)
|
||||
.setTitle(content);
|
||||
alertData.setContent(content)
|
||||
.setTitle(title);
|
||||
|
||||
boolean sendResponseStatus = true;
|
||||
List<AlertSendResponseResult> sendResponseResults = new ArrayList<>();
|
||||
@ -126,7 +125,7 @@ public class AlertSender {
|
||||
for (AlertPluginInstance instance : alertInstanceList) {
|
||||
AlertResult alertResult = this.alertResultHandler(instance, alertData);
|
||||
AlertSendResponseResult alertSendResponseResult = new AlertSendResponseResult(
|
||||
Boolean.parseBoolean(String.valueOf(alertResult.getStatus())), alertResult.getMessage());
|
||||
Boolean.parseBoolean(String.valueOf(alertResult.getStatus())), alertResult.getMessage());
|
||||
sendResponseStatus = sendResponseStatus && alertSendResponseResult.getStatus();
|
||||
sendResponseResults.add(alertSendResponseResult);
|
||||
}
|
||||
@ -142,7 +141,7 @@ public class AlertSender {
|
||||
* @return AlertResult
|
||||
*/
|
||||
private AlertResult alertResultHandler(AlertPluginInstance instance, AlertData alertData) {
|
||||
String pluginName = pluginDao.getPluginDefineById(instance.getPluginDefineId()).getPluginName();
|
||||
String pluginName = alertPluginManager.getPluginNameById(instance.getPluginDefineId());
|
||||
AlertChannel alertChannel = alertPluginManager.getAlertChannelMap().get(pluginName);
|
||||
AlertResult alertResultExtend = new AlertResult();
|
||||
String pluginInstanceName = instance.getInstanceName();
|
||||
@ -156,8 +155,16 @@ public class AlertSender {
|
||||
|
||||
AlertInfo alertInfo = new AlertInfo();
|
||||
alertInfo.setAlertData(alertData);
|
||||
alertInfo.setAlertParams(instance.getPluginInstanceParams());
|
||||
AlertResult alertResult = alertChannel.process(alertInfo);
|
||||
Map<String, String> paramsMap = JSONUtils.toMap(instance.getPluginInstanceParams());
|
||||
alertInfo.setAlertParams(paramsMap);
|
||||
AlertResult alertResult;
|
||||
try {
|
||||
alertResult = alertChannel.process(alertInfo);
|
||||
} catch (Exception e) {
|
||||
alertResult = new AlertResult("false", e.getMessage());
|
||||
logger.error("send alert error alert data id :{},", alertData.getId(), e);
|
||||
}
|
||||
|
||||
|
||||
if (alertResult == null) {
|
||||
String message = String.format("Alert Plugin %s send error : return alertResult value is null", pluginInstanceName);
|
||||
|
@ -21,7 +21,7 @@
|
||||
#eg : Alert Server Listener port
|
||||
|
||||
#alert.plugin.dir config the Alert Plugin dir . AlertServer while find and load the Alert Plugin Jar from this dir when deploy and start AlertServer on the server .
|
||||
#eg :alert.plugin.dir=/opt/soft/spi/lib/plugin/alert
|
||||
alert.plugin.dir=./lib/plugin/alert
|
||||
|
||||
#maven.local.repository=/Users/gaojun/Documents/jianguoyun/localRepository
|
||||
|
||||
|
@ -34,12 +34,13 @@ import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.Mockito;
|
||||
import org.powermock.api.mockito.PowerMockito;
|
||||
import org.powermock.core.classloader.annotations.PrepareForTest;
|
||||
import org.powermock.modules.junit4.PowerMockRunner;
|
||||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest({AlertServer.class,DaoFactory.class})
|
||||
@PrepareForTest({AlertServer.class, DaoFactory.class})
|
||||
public class AlertServerTest {
|
||||
|
||||
@Before
|
||||
@ -61,7 +62,8 @@ public class AlertServerTest {
|
||||
AlertPluginManager alertPluginManager = PowerMockito.mock(AlertPluginManager.class);
|
||||
PowerMockito.whenNew(AlertPluginManager.class).withNoArguments().thenReturn(alertPluginManager);
|
||||
ConcurrentHashMap alertChannelMap = new ConcurrentHashMap<>();
|
||||
alertChannelMap.put("pluginName",alertChannelMock);
|
||||
alertChannelMap.put("pluginName", alertChannelMock);
|
||||
PowerMockito.when(alertPluginManager.getPluginNameById(Mockito.anyInt())).thenReturn("pluginName");
|
||||
PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap);
|
||||
|
||||
DolphinPluginManagerConfig alertPluginManagerConfig = PowerMockito.mock(DolphinPluginManagerConfig.class);
|
||||
@ -79,7 +81,8 @@ public class AlertServerTest {
|
||||
Assert.assertNotNull(alertServer);
|
||||
|
||||
new Thread(() -> {
|
||||
alertServer.start(); })
|
||||
alertServer.start();
|
||||
})
|
||||
.start();
|
||||
|
||||
Thread.sleep(5 * Constants.ALERT_SCAN_INTERVAL);
|
||||
|
@ -136,7 +136,7 @@ public class EmailAlertPluginTest {
|
||||
alertPluginInstance.setPluginInstanceParams(getEmailAlertParams());
|
||||
alertDao.getAlertPluginInstanceMapper().insert(alertPluginInstance);
|
||||
|
||||
AlertSender alertSender = new AlertSender(alertList, alertDao, alertPluginManager, pluginDao);
|
||||
AlertSender alertSender = new AlertSender(alertList, alertDao, alertPluginManager);
|
||||
alertSender.run();
|
||||
|
||||
Alert alertResult = alertDao.getAlertMapper().selectById(alert1.getId());
|
||||
|
@ -19,7 +19,6 @@ package org.apache.dolphinscheduler.alert.processor;
|
||||
|
||||
import org.apache.dolphinscheduler.alert.plugin.AlertPluginManager;
|
||||
import org.apache.dolphinscheduler.dao.AlertDao;
|
||||
import org.apache.dolphinscheduler.dao.PluginDao;
|
||||
import org.apache.dolphinscheduler.remote.command.Command;
|
||||
import org.apache.dolphinscheduler.remote.command.CommandType;
|
||||
import org.apache.dolphinscheduler.remote.command.alert.AlertSendRequestCommand;
|
||||
@ -37,7 +36,6 @@ import io.netty.channel.Channel;
|
||||
public class AlertRequestProcessorTest {
|
||||
|
||||
private AlertDao alertDao;
|
||||
private PluginDao pluginDao;
|
||||
private AlertPluginManager alertPluginManager;
|
||||
|
||||
private AlertRequestProcessor alertRequestProcessor;
|
||||
@ -45,17 +43,16 @@ public class AlertRequestProcessorTest {
|
||||
@Before
|
||||
public void before() {
|
||||
alertDao = PowerMockito.mock(AlertDao.class);
|
||||
pluginDao = PowerMockito.mock(PluginDao.class);
|
||||
alertPluginManager = PowerMockito.mock(AlertPluginManager.class);
|
||||
alertRequestProcessor = new AlertRequestProcessor(alertDao,alertPluginManager,pluginDao);
|
||||
alertRequestProcessor = new AlertRequestProcessor(alertDao, alertPluginManager);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProcess() {
|
||||
Channel channel = PowerMockito.mock(Channel.class);
|
||||
AlertSendRequestCommand alertSendRequestCommand = new AlertSendRequestCommand(1,"title","content");
|
||||
AlertSendRequestCommand alertSendRequestCommand = new AlertSendRequestCommand(1, "title", "content");
|
||||
Command reqCommand = alertSendRequestCommand.convert2Command();
|
||||
Assert.assertEquals(CommandType.ALERT_SEND_REQUEST,reqCommand.getType());
|
||||
alertRequestProcessor.process(channel,reqCommand);
|
||||
Assert.assertEquals(CommandType.ALERT_SEND_REQUEST, reqCommand.getType());
|
||||
alertRequestProcessor.process(channel, reqCommand);
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ public class AlertSenderTest {
|
||||
int alertGroupId = 1;
|
||||
String title = "alert mail test title";
|
||||
String content = "alert mail test content";
|
||||
alertSender = new AlertSender(alertDao,alertPluginManager,pluginDao);
|
||||
alertSender = new AlertSender(alertDao, alertPluginManager);
|
||||
|
||||
//1.alert instance does not exist
|
||||
PowerMockito.when(alertDao.listInstanceByAlertGroupId(alertGroupId)).thenReturn(null);
|
||||
@ -75,7 +75,7 @@ public class AlertSenderTest {
|
||||
AlertSendResponseCommand alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content);
|
||||
Assert.assertFalse(alertSendResponseCommand.getResStatus());
|
||||
alertSendResponseCommand.getResResults().forEach(result ->
|
||||
logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage()));
|
||||
logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage()));
|
||||
|
||||
//2.alert plugin does not exist
|
||||
int pluginDefineId = 1;
|
||||
@ -83,30 +83,31 @@ public class AlertSenderTest {
|
||||
String pluginInstanceName = "alert-instance-mail";
|
||||
List<AlertPluginInstance> alertInstanceList = new ArrayList<>();
|
||||
AlertPluginInstance alertPluginInstance = new AlertPluginInstance(
|
||||
pluginDefineId,pluginInstanceParams,pluginInstanceName);
|
||||
pluginDefineId, pluginInstanceParams, pluginInstanceName);
|
||||
alertInstanceList.add(alertPluginInstance);
|
||||
PowerMockito.when(alertDao.listInstanceByAlertGroupId(1)).thenReturn(alertInstanceList);
|
||||
|
||||
String pluginName = "alert-plugin-mail";
|
||||
PluginDefine pluginDefine = new PluginDefine(pluginName,"1",null);
|
||||
PluginDefine pluginDefine = new PluginDefine(pluginName, "1", null);
|
||||
PowerMockito.when(pluginDao.getPluginDefineById(pluginDefineId)).thenReturn(pluginDefine);
|
||||
|
||||
alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content);
|
||||
Assert.assertFalse(alertSendResponseCommand.getResStatus());
|
||||
alertSendResponseCommand.getResResults().forEach(result ->
|
||||
logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage()));
|
||||
logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage()));
|
||||
|
||||
//3.alert result value is null
|
||||
AlertChannel alertChannelMock = PowerMockito.mock(AlertChannel.class);
|
||||
PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(null);
|
||||
Map<String, AlertChannel> alertChannelMap = new ConcurrentHashMap<>();
|
||||
alertChannelMap.put(pluginName,alertChannelMock);
|
||||
alertChannelMap.put(pluginName, alertChannelMock);
|
||||
PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap);
|
||||
PowerMockito.when(alertPluginManager.getPluginNameById(Mockito.anyInt())).thenReturn("alert-plugin-mail");
|
||||
|
||||
alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content);
|
||||
Assert.assertFalse(alertSendResponseCommand.getResStatus());
|
||||
alertSendResponseCommand.getResResults().forEach(result ->
|
||||
logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage()));
|
||||
logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage()));
|
||||
|
||||
//4.abnormal information inside the alert plug-in code
|
||||
AlertResult alertResult = new AlertResult();
|
||||
@ -114,27 +115,27 @@ public class AlertSenderTest {
|
||||
alertResult.setMessage("Abnormal information inside the alert plug-in code");
|
||||
PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult);
|
||||
alertChannelMap = new ConcurrentHashMap<>();
|
||||
alertChannelMap.put(pluginName,alertChannelMock);
|
||||
alertChannelMap.put(pluginName, alertChannelMock);
|
||||
PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap);
|
||||
|
||||
alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content);
|
||||
Assert.assertFalse(alertSendResponseCommand.getResStatus());
|
||||
alertSendResponseCommand.getResResults().forEach(result ->
|
||||
logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage()));
|
||||
logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage()));
|
||||
|
||||
//5.alert plugin send success
|
||||
alertResult = new AlertResult();
|
||||
alertResult.setStatus(String.valueOf(true));
|
||||
alertResult.setMessage(String.format("Alert Plugin %s send success",pluginInstanceName));
|
||||
alertResult.setMessage(String.format("Alert Plugin %s send success", pluginInstanceName));
|
||||
PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult);
|
||||
alertChannelMap = new ConcurrentHashMap<>();
|
||||
alertChannelMap.put(pluginName,alertChannelMock);
|
||||
alertChannelMap.put(pluginName, alertChannelMock);
|
||||
PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap);
|
||||
|
||||
alertSendResponseCommand = alertSender.syncHandler(alertGroupId, title, content);
|
||||
Assert.assertTrue(alertSendResponseCommand.getResStatus());
|
||||
alertSendResponseCommand.getResResults().forEach(result ->
|
||||
logger.info("alert send response result, status:{}, message:{}",result.getStatus(),result.getMessage()));
|
||||
logger.info("alert send response result, status:{}, message:{}", result.getStatus(), result.getMessage()));
|
||||
|
||||
}
|
||||
|
||||
@ -150,28 +151,29 @@ public class AlertSenderTest {
|
||||
alert.setContent(content);
|
||||
alertList.add(alert);
|
||||
|
||||
alertSender = new AlertSender(alertList,alertDao,alertPluginManager,pluginDao);
|
||||
alertSender = new AlertSender(alertList, alertDao, alertPluginManager);
|
||||
|
||||
int pluginDefineId = 1;
|
||||
String pluginInstanceParams = "alert-instance-mail-params";
|
||||
String pluginInstanceName = "alert-instance-mail";
|
||||
List<AlertPluginInstance> alertInstanceList = new ArrayList<>();
|
||||
AlertPluginInstance alertPluginInstance = new AlertPluginInstance(
|
||||
pluginDefineId,pluginInstanceParams,pluginInstanceName);
|
||||
pluginDefineId, pluginInstanceParams, pluginInstanceName);
|
||||
alertInstanceList.add(alertPluginInstance);
|
||||
PowerMockito.when(alertDao.listInstanceByAlertGroupId(alertGroupId)).thenReturn(alertInstanceList);
|
||||
|
||||
String pluginName = "alert-plugin-mail";
|
||||
PluginDefine pluginDefine = new PluginDefine(pluginName,"1",null);
|
||||
PluginDefine pluginDefine = new PluginDefine(pluginName, "1", null);
|
||||
PowerMockito.when(pluginDao.getPluginDefineById(pluginDefineId)).thenReturn(pluginDefine);
|
||||
PowerMockito.when(alertPluginManager.getPluginNameById(1)).thenReturn("alert-instance-mail");
|
||||
|
||||
AlertResult alertResult = new AlertResult();
|
||||
alertResult.setStatus(String.valueOf(true));
|
||||
alertResult.setMessage(String.format("Alert Plugin %s send success",pluginInstanceName));
|
||||
alertResult.setMessage(String.format("Alert Plugin %s send success", pluginInstanceName));
|
||||
AlertChannel alertChannelMock = PowerMockito.mock(AlertChannel.class);
|
||||
PowerMockito.when(alertChannelMock.process(Mockito.any())).thenReturn(alertResult);
|
||||
ConcurrentHashMap alertChannelMap = new ConcurrentHashMap<>();
|
||||
alertChannelMap.put(pluginName,alertChannelMock);
|
||||
alertChannelMap.put(pluginName, alertChannelMock);
|
||||
PowerMockito.when(alertPluginManager.getAlertChannelMap()).thenReturn(alertChannelMap);
|
||||
Assert.assertTrue(Boolean.parseBoolean(alertResult.getStatus()));
|
||||
alertSender.run();
|
||||
|
@ -96,10 +96,14 @@ public class DataSourceController extends BaseController {
|
||||
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
|
||||
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
|
||||
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
|
||||
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
|
||||
})
|
||||
@PostMapping(value = "/create")
|
||||
@ResponseStatus(HttpStatus.CREATED)
|
||||
@ -115,10 +119,14 @@ public class DataSourceController extends BaseController {
|
||||
@RequestParam(value = "userName") String userName,
|
||||
@RequestParam(value = "password") String password,
|
||||
@RequestParam(value = "connectType") DbConnectType connectType,
|
||||
@RequestParam(value = "other") String other) {
|
||||
@RequestParam(value = "other") String other,
|
||||
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
|
||||
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
|
||||
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
|
||||
logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
|
||||
loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
|
||||
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
|
||||
return dataSourceService.createDataSource(loginUser, name, note, type, parameter);
|
||||
}
|
||||
|
||||
@ -149,10 +157,14 @@ public class DataSourceController extends BaseController {
|
||||
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
|
||||
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
|
||||
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
|
||||
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
|
||||
})
|
||||
@PostMapping(value = "/update")
|
||||
@ResponseStatus(HttpStatus.OK)
|
||||
@ -169,10 +181,14 @@ public class DataSourceController extends BaseController {
|
||||
@RequestParam(value = "userName") String userName,
|
||||
@RequestParam(value = "password") String password,
|
||||
@RequestParam(value = "connectType") DbConnectType connectType,
|
||||
@RequestParam(value = "other") String other) {
|
||||
@RequestParam(value = "other") String other,
|
||||
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
|
||||
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
|
||||
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
|
||||
logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
|
||||
loginUser.getUserName(), name, note, type, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
|
||||
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
|
||||
return dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
|
||||
}
|
||||
|
||||
@ -274,10 +290,14 @@ public class DataSourceController extends BaseController {
|
||||
@ApiImplicitParam(name = "host", value = "DATA_SOURCE_HOST", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "port", value = "DATA_SOURCE_PORT", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "principal", value = "DATA_SOURCE_PRINCIPAL", dataType = "String"),
|
||||
@ApiImplicitParam(name = "userName", value = "USER_NAME", required = true, dataType = "String"),
|
||||
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType = "String"),
|
||||
@ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String")
|
||||
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType = "String"),
|
||||
@ApiImplicitParam(name = "javaSecurityKrb5Conf", value = "DATA_SOURCE_KERBEROS_KRB5_CONF", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabUsername", value = "DATA_SOURCE_KERBEROS_KEYTAB_USERNAME", dataType = "String"),
|
||||
@ApiImplicitParam(name = "loginUserKeytabPath", value = "DATA_SOURCE_KERBEROS_KEYTAB_PATH", dataType = "String")
|
||||
})
|
||||
@PostMapping(value = "/connect")
|
||||
@ResponseStatus(HttpStatus.OK)
|
||||
@ -293,10 +313,14 @@ public class DataSourceController extends BaseController {
|
||||
@RequestParam(value = "userName") String userName,
|
||||
@RequestParam(value = "password") String password,
|
||||
@RequestParam(value = "connectType") DbConnectType connectType,
|
||||
@RequestParam(value = "other") String other) {
|
||||
@RequestParam(value = "other") String other,
|
||||
@RequestParam(value = "javaSecurityKrb5Conf", required = false) String javaSecurityKrb5Conf,
|
||||
@RequestParam(value = "loginUserKeytabUsername", required = false) String loginUserKeytabUsername,
|
||||
@RequestParam(value = "loginUserKeytabPath", required = false) String loginUserKeytabPath) {
|
||||
logger.info("login user {}, connect datasource: {}, note: {}, type: {}, connectType: {}, other: {}",
|
||||
loginUser.getUserName(), name, note, type, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other);
|
||||
String parameter = dataSourceService.buildParameter(type, host, port, database, principal, userName, password, connectType, other,
|
||||
javaSecurityKrb5Conf, loginUserKeytabUsername, loginUserKeytabPath);
|
||||
return dataSourceService.checkConnection(type, parameter);
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ public enum Status {
|
||||
QUERY_TASK_INSTANCE_LOG_ERROR(10103, "view task instance log error", "查询任务实例日志错误"),
|
||||
DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104, "download task instance log file error", "下载任务日志文件错误"),
|
||||
CREATE_PROCESS_DEFINITION(10105, "create process definition", "创建工作流错误"),
|
||||
VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称已存在"),
|
||||
VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称验证错误"),
|
||||
UPDATE_PROCESS_DEFINITION_ERROR(10107, "update process definition error", "更新工作流定义错误"),
|
||||
RELEASE_PROCESS_DEFINITION_ERROR(10108, "release process definition error", "上线工作流错误"),
|
||||
QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR(10109, "query datail of process definition error", "查询工作流详细信息错误"),
|
||||
@ -199,6 +199,7 @@ public enum Status {
|
||||
FORCE_TASK_SUCCESS_ERROR(10165, "force task success error", "强制成功任务实例错误"),
|
||||
TASK_INSTANCE_STATE_OPERATION_ERROR(10166, "the status of task instance {0} is {1},Cannot perform force success operation", "任务实例[{0}]的状态是[{1}],无法执行强制成功操作"),
|
||||
DATASOURCE_TYPE_NOT_EXIST(10167, "data source type not exist", "数据源类型不存在"),
|
||||
PROCESS_DEFINITION_NAME_EXIST(10168, "process definition name {0} already exists", "工作流定义名称[{0}]已存在"),
|
||||
|
||||
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
|
||||
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),
|
||||
@ -290,6 +291,7 @@ public enum Status {
|
||||
QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR(110009, "query all alert plugin instance error", "查询所有告警实例失败"),
|
||||
PLUGIN_INSTANCE_ALREADY_EXIT(110010,"plugin instance already exit","该告警插件实例已存在"),
|
||||
LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR(110011,"query plugin instance page error","分页查询告警实例失败"),
|
||||
DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED(110012,"failed to delete the alert instance, there is an alarm group associated with this alert instance","删除告警实例失败,存在与此告警实例关联的警报组")
|
||||
|
||||
;
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
package org.apache.dolphinscheduler.api.interceptor;
|
||||
|
||||
import org.apache.dolphinscheduler.api.service.BaseService;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
|
||||
import java.util.Locale;
|
||||
@ -30,12 +29,13 @@ import org.springframework.context.i18n.LocaleContextHolder;
|
||||
import org.springframework.lang.Nullable;
|
||||
import org.springframework.util.StringUtils;
|
||||
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter;
|
||||
import org.springframework.web.util.WebUtils;
|
||||
|
||||
public class LocaleChangeInterceptor extends HandlerInterceptorAdapter {
|
||||
|
||||
@Override
|
||||
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
|
||||
Cookie cookie = BaseService.getCookie(request, Constants.LOCALE_LANGUAGE);
|
||||
Cookie cookie = WebUtils.getCookie(request, Constants.LOCALE_LANGUAGE);
|
||||
if (cookie != null) {
|
||||
// Proceed in cookie
|
||||
return true;
|
||||
|
@ -113,27 +113,6 @@ public class BaseService {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* get cookie info by name
|
||||
*
|
||||
* @param request request
|
||||
* @param name 'sessionId'
|
||||
* @return get cookie info
|
||||
*/
|
||||
public static Cookie getCookie(HttpServletRequest request, String name) {
|
||||
Cookie[] cookies = request.getCookies();
|
||||
if (cookies != null && cookies.length > 0) {
|
||||
for (Cookie cookie : cookies) {
|
||||
if (StringUtils.isNotEmpty(name) && name.equalsIgnoreCase(cookie.getName())) {
|
||||
return cookie;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* create tenant dir if not exists
|
||||
*
|
||||
|
@ -267,6 +267,9 @@ public class DataSourceService extends BaseService {
|
||||
map.put(HOST, host);
|
||||
map.put(PORT, port);
|
||||
map.put(PRINCIPAL, datasourceForm.getPrincipal());
|
||||
map.put(Constants.KERBEROS_KRB5_CONF_PATH, datasourceForm.getJavaSecurityKrb5Conf());
|
||||
map.put(Constants.KERBEROS_KEY_TAB_USERNAME, datasourceForm.getLoginUserKeytabUsername());
|
||||
map.put(Constants.KERBEROS_KEY_TAB_PATH, datasourceForm.getLoginUserKeytabPath());
|
||||
map.put(DATABASE, database);
|
||||
map.put(USER_NAME, datasourceForm.getUser());
|
||||
map.put(OTHER, otherMap);
|
||||
@ -424,7 +427,8 @@ public class DataSourceService extends BaseService {
|
||||
*/
|
||||
public String buildParameter(DbType type, String host,
|
||||
String port, String database, String principal, String userName,
|
||||
String password, DbConnectType connectType, String other) {
|
||||
String password, DbConnectType connectType, String other,
|
||||
String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) {
|
||||
|
||||
String address = buildAddress(type, host, port, connectType);
|
||||
Map<String, Object> parameterMap = new LinkedHashMap<String, Object>(6);
|
||||
@ -467,6 +471,9 @@ public class DataSourceService extends BaseService {
|
||||
if (CommonUtils.getKerberosStartupState()
|
||||
&& (type == DbType.HIVE || type == DbType.SPARK)) {
|
||||
parameterMap.put(Constants.PRINCIPAL, principal);
|
||||
parameterMap.put(Constants.KERBEROS_KRB5_CONF_PATH, javaSecurityKrb5Conf);
|
||||
parameterMap.put(Constants.KERBEROS_KEY_TAB_USERNAME, loginUserKeytabUsername);
|
||||
parameterMap.put(Constants.KERBEROS_KEY_TAB_PATH, loginUserKeytabPath);
|
||||
}
|
||||
|
||||
Map<String, String> map = JSONUtils.toMap(other);
|
||||
|
@ -256,7 +256,7 @@ public class ProcessInstanceService extends BaseService {
|
||||
List<ProcessInstance> processInstances = processInstanceList.getRecords();
|
||||
|
||||
for (ProcessInstance processInstance : processInstances) {
|
||||
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
|
||||
processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()));
|
||||
User executor = usersService.queryUser(processInstance.getExecutorId());
|
||||
if (null != executor) {
|
||||
processInstance.setExecutorName(executor.getUserName());
|
||||
@ -428,10 +428,9 @@ public class ProcessInstanceService extends BaseService {
|
||||
return result;
|
||||
}
|
||||
Date schedule = null;
|
||||
schedule = processInstance.getScheduleTime();
|
||||
if (scheduleTime != null) {
|
||||
schedule = DateUtils.getScheduleDate(scheduleTime);
|
||||
} else {
|
||||
schedule = processInstance.getScheduleTime();
|
||||
}
|
||||
processInstance.setScheduleTime(schedule);
|
||||
processInstance.setLocations(locations);
|
||||
@ -460,13 +459,18 @@ public class ProcessInstanceService extends BaseService {
|
||||
if (tenant != null) {
|
||||
processInstance.setTenantCode(tenant.getTenantCode());
|
||||
}
|
||||
// get the processinstancejson before saving,and then save the name and taskid
|
||||
String oldJson = processInstance.getProcessInstanceJson();
|
||||
if (StringUtils.isNotEmpty(oldJson)) {
|
||||
processInstanceJson = processService.changeJson(processData,oldJson);
|
||||
}
|
||||
processInstance.setProcessInstanceJson(processInstanceJson);
|
||||
processInstance.setGlobalParams(globalParams);
|
||||
}
|
||||
|
||||
int update = processService.updateProcessInstance(processInstance);
|
||||
int updateDefine = 1;
|
||||
if (Boolean.TRUE.equals(syncDefine) && StringUtils.isNotEmpty(processInstanceJson)) {
|
||||
if (Boolean.TRUE.equals(syncDefine)) {
|
||||
processDefinition.setProcessDefinitionJson(processInstanceJson);
|
||||
processDefinition.setGlobalParams(originDefParams);
|
||||
processDefinition.setLocations(locations);
|
||||
|
@ -131,7 +131,7 @@ public class TaskInstanceService extends BaseService {
|
||||
List<TaskInstance> taskInstanceList = taskInstanceIPage.getRecords();
|
||||
|
||||
for (TaskInstance taskInstance : taskInstanceList) {
|
||||
taskInstance.setDuration(DateUtils.differSec(taskInstance.getStartTime(), taskInstance.getEndTime()));
|
||||
taskInstance.setDuration(DateUtils.format2Duration(taskInstance.getStartTime(), taskInstance.getEndTime()));
|
||||
User executor = usersService.queryUser(taskInstance.getExecutorId());
|
||||
if (null != executor) {
|
||||
taskInstance.setExecutorName(executor.getUserName());
|
||||
|
@ -153,19 +153,21 @@ public class WorkerGroupService extends BaseService {
|
||||
}
|
||||
}
|
||||
|
||||
// available workerGroup list
|
||||
List<String> availableWorkerGroupList = new ArrayList<>();
|
||||
|
||||
for (String workerGroup : workerGroupList) {
|
||||
String workerGroupPath = workerPath + "/" + workerGroup;
|
||||
List<String> childrenNodes = zookeeperCachedOperator.getChildrenKeys(workerGroupPath);
|
||||
String timeStamp = "";
|
||||
for (int i = 0; i < childrenNodes.size(); i++) {
|
||||
String ip = childrenNodes.get(i);
|
||||
childrenNodes.set(i, ip.substring(0, ip.lastIndexOf(":")));
|
||||
timeStamp = ip.substring(ip.lastIndexOf(":"));
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(childrenNodes)) {
|
||||
availableWorkerGroupList.add(workerGroup);
|
||||
WorkerGroup wg = new WorkerGroup();
|
||||
wg.setName(workerGroup);
|
||||
if (isPaging) {
|
||||
wg.setIpList(childrenNodes);
|
||||
String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0));
|
||||
String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0) + timeStamp);
|
||||
wg.setCreateTime(DateUtils.stringToDate(registeredIpValue.split(",")[6]));
|
||||
wg.setUpdateTime(DateUtils.stringToDate(registeredIpValue.split(",")[7]));
|
||||
}
|
||||
|
@ -24,16 +24,25 @@ import org.apache.dolphinscheduler.api.utils.PageInfo;
|
||||
import org.apache.dolphinscheduler.api.vo.AlertPluginInstanceVO;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.JSONUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance;
|
||||
import org.apache.dolphinscheduler.dao.entity.PluginDefine;
|
||||
import org.apache.dolphinscheduler.dao.entity.User;
|
||||
import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper;
|
||||
import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper;
|
||||
import org.apache.dolphinscheduler.dao.mapper.PluginDefineMapper;
|
||||
import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer;
|
||||
import org.apache.dolphinscheduler.spi.params.base.PluginParams;
|
||||
|
||||
import org.apache.commons.collections4.MapUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
@ -56,6 +65,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
@Autowired
|
||||
private PluginDefineMapper pluginDefineMapper;
|
||||
|
||||
@Autowired
|
||||
private AlertGroupMapper alertGroupMapper;
|
||||
|
||||
/**
|
||||
* creat alert plugin instance
|
||||
*
|
||||
@ -67,7 +79,8 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
@Override
|
||||
public Map<String, Object> create(User loginUser, int pluginDefineId, String instanceName, String pluginInstanceParams) {
|
||||
AlertPluginInstance alertPluginInstance = new AlertPluginInstance();
|
||||
alertPluginInstance.setPluginInstanceParams(pluginInstanceParams);
|
||||
String paramsMapJson = parsePluginParamsMap(pluginInstanceParams);
|
||||
alertPluginInstance.setPluginInstanceParams(paramsMapJson);
|
||||
alertPluginInstance.setInstanceName(instanceName);
|
||||
alertPluginInstance.setPluginDefineId(pluginDefineId);
|
||||
|
||||
@ -82,7 +95,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
|
||||
if (i > 0) {
|
||||
putMsg(result, Status.SUCCESS);
|
||||
return result;
|
||||
}
|
||||
putMsg(result, Status.SAVE_ERROR);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -98,7 +113,8 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
public Map<String, Object> update(User loginUser, int pluginInstanceId, String instanceName, String pluginInstanceParams) {
|
||||
|
||||
AlertPluginInstance alertPluginInstance = new AlertPluginInstance();
|
||||
alertPluginInstance.setPluginInstanceParams(pluginInstanceParams);
|
||||
String paramsMapJson = parsePluginParamsMap(pluginInstanceParams);
|
||||
alertPluginInstance.setPluginInstanceParams(paramsMapJson);
|
||||
alertPluginInstance.setInstanceName(instanceName);
|
||||
alertPluginInstance.setId(pluginInstanceId);
|
||||
Map<String, Object> result = new HashMap<>();
|
||||
@ -106,8 +122,9 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
|
||||
if (i > 0) {
|
||||
putMsg(result, Status.SUCCESS);
|
||||
return result;
|
||||
}
|
||||
|
||||
putMsg(result, Status.SAVE_ERROR);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -121,6 +138,13 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
@Override
|
||||
public Map<String, Object> delete(User loginUser, int id) {
|
||||
Map<String, Object> result = new HashMap<>();
|
||||
//check if there is an associated alert group
|
||||
boolean hasAssociatedAlertGroup = checkHasAssociatedAlertGroup(String.valueOf(id));
|
||||
if (hasAssociatedAlertGroup) {
|
||||
putMsg(result, Status.DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED);
|
||||
return result;
|
||||
}
|
||||
|
||||
int i = alertPluginInstanceMapper.deleteById(id);
|
||||
if (i > 0) {
|
||||
putMsg(result, Status.SUCCESS);
|
||||
@ -188,21 +212,73 @@ public class AlertPluginInstanceServiceImpl extends BaseService implements Alert
|
||||
if (CollectionUtils.isEmpty(pluginDefineList)) {
|
||||
return null;
|
||||
}
|
||||
Map<Integer, String> pluginDefineMap = pluginDefineList.stream().collect(Collectors.toMap(PluginDefine::getId, PluginDefine::getPluginName));
|
||||
Map<Integer, PluginDefine> pluginDefineMap = pluginDefineList.stream().collect(Collectors.toMap(PluginDefine::getId, Function.identity()));
|
||||
List<AlertPluginInstanceVO> alertPluginInstanceVOS = new ArrayList<>();
|
||||
alertPluginInstances.forEach(alertPluginInstance -> {
|
||||
AlertPluginInstanceVO alertPluginInstanceVO = new AlertPluginInstanceVO();
|
||||
alertPluginInstanceVO.setAlertPluginName(pluginDefineMap.get(alertPluginInstance.getPluginDefineId()));
|
||||
|
||||
alertPluginInstanceVO.setCreateTime(alertPluginInstance.getCreateTime());
|
||||
alertPluginInstanceVO.setUpdateTime(alertPluginInstance.getUpdateTime());
|
||||
alertPluginInstanceVO.setPluginDefineId(alertPluginInstance.getPluginDefineId());
|
||||
alertPluginInstanceVO.setInstanceName(alertPluginInstance.getInstanceName());
|
||||
alertPluginInstanceVO.setId(alertPluginInstance.getId());
|
||||
PluginDefine pluginDefine = pluginDefineMap.get(alertPluginInstance.getPluginDefineId());
|
||||
//FIXME When the user removes the plug-in, this will happen. At this time, maybe we should add a new field to indicate that the plug-in has expired?
|
||||
if (null == pluginDefine) {
|
||||
return;
|
||||
}
|
||||
alertPluginInstanceVO.setAlertPluginName(pluginDefine.getPluginName());
|
||||
//todo List pages do not recommend returning this parameter
|
||||
alertPluginInstanceVO.setPluginInstanceParams(alertPluginInstance.getPluginInstanceParams());
|
||||
String pluginParamsMapString = alertPluginInstance.getPluginInstanceParams();
|
||||
String uiPluginParams = parseToPluginUiParams(pluginParamsMapString, pluginDefine.getPluginParams());
|
||||
alertPluginInstanceVO.setPluginInstanceParams(uiPluginParams);
|
||||
alertPluginInstanceVOS.add(alertPluginInstanceVO);
|
||||
});
|
||||
return alertPluginInstanceVOS;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the parameters actually needed by the plugin
|
||||
*
|
||||
* @param pluginParams Complete parameters(include ui)
|
||||
* @return k, v(json string)
|
||||
*/
|
||||
private String parsePluginParamsMap(String pluginParams) {
|
||||
Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(pluginParams);
|
||||
return JSONUtils.toJsonString(paramsMap);
|
||||
}
|
||||
|
||||
/**
|
||||
* parseToPluginUiParams
|
||||
*
|
||||
* @param pluginParamsMapString k-v data
|
||||
* @param pluginUiParams Complete parameters(include ui)
|
||||
* @return Complete parameters list(include ui)
|
||||
*/
|
||||
private String parseToPluginUiParams(String pluginParamsMapString, String pluginUiParams) {
|
||||
Map<String, String> paramsMap = JSONUtils.toMap(pluginParamsMapString);
|
||||
if (MapUtils.isEmpty(paramsMap)) {
|
||||
return null;
|
||||
}
|
||||
List<PluginParams> pluginParamsList = JSONUtils.toList(pluginUiParams, PluginParams.class);
|
||||
List<PluginParams> newPluginParamsList = new ArrayList<>(pluginParamsList.size());
|
||||
pluginParamsList.forEach(pluginParams -> {
|
||||
pluginParams.setValue(paramsMap.get(pluginParams.getName()));
|
||||
newPluginParamsList.add(pluginParams);
|
||||
|
||||
});
|
||||
|
||||
return JSONUtils.toJsonString(newPluginParamsList);
|
||||
}
|
||||
|
||||
private boolean checkHasAssociatedAlertGroup(String id) {
|
||||
List<String> idsList = alertGroupMapper.queryInstanceIdsList();
|
||||
if (CollectionUtils.isEmpty(idsList)) {
|
||||
return false;
|
||||
}
|
||||
Optional<String> first = idsList.stream().filter(k -> null != k && Arrays.asList(k.split(",")).contains(id)).findFirst();
|
||||
return first.isPresent();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -429,11 +429,13 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
|
||||
// check whether the new process define name exist
|
||||
ProcessDefinition definition = processDefineMapper.verifyByDefineName(project.getId(), name);
|
||||
if (definition != null) {
|
||||
putMsg(result, Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR, name);
|
||||
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// get the processdefinitionjson before saving,and then save the name and taskid
|
||||
String oldJson = processDefine.getProcessDefinitionJson();
|
||||
processDefinitionJson = processService.changeJson(processData,oldJson);
|
||||
Date now = new Date();
|
||||
|
||||
processDefine.setId(id);
|
||||
@ -495,7 +497,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
|
||||
if (processDefinition == null) {
|
||||
putMsg(result, Status.SUCCESS);
|
||||
} else {
|
||||
putMsg(result, Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR, name);
|
||||
putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -1325,7 +1327,7 @@ public class ProcessDefinitionServiceImpl extends BaseService implements
|
||||
List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineId(processId, limit);
|
||||
|
||||
for (ProcessInstance processInstance : processInstanceList) {
|
||||
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
|
||||
processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()));
|
||||
}
|
||||
|
||||
if (limit > processInstanceList.size()) {
|
||||
|
@ -38,6 +38,7 @@ import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
import org.springframework.web.util.WebUtils;
|
||||
|
||||
/**
|
||||
* session service implement
|
||||
@ -60,7 +61,7 @@ public class SessionServiceImpl extends BaseService implements SessionService {
|
||||
String sessionId = request.getHeader(Constants.SESSION_ID);
|
||||
|
||||
if (StringUtils.isBlank(sessionId)) {
|
||||
Cookie cookie = getCookie(request, Constants.SESSION_ID);
|
||||
Cookie cookie = WebUtils.getCookie(request, Constants.SESSION_ID);
|
||||
|
||||
if (cookie != null) {
|
||||
sessionId = cookie.getValue();
|
||||
|
@ -125,6 +125,10 @@ TENANT_CODE=os tenant code
|
||||
QUEUE_NAME=queue name
|
||||
PASSWORD=password
|
||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
|
||||
DATA_SOURCE_PRINCIPAL=principal
|
||||
DATA_SOURCE_KERBEROS_KRB5_CONF=the kerberos authentication parameter java.security.krb5.conf
|
||||
DATA_SOURCE_KERBEROS_KEYTAB_USERNAME=the kerberos authentication parameter login.user.keytab.username
|
||||
DATA_SOURCE_KERBEROS_KEYTAB_PATH=the kerberos authentication parameter login.user.keytab.path
|
||||
PROJECT_TAG=project related operation
|
||||
CREATE_PROJECT_NOTES=create project
|
||||
PROJECT_DESC=project description
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user