[Improvement][WorkerGroup] Optimize and reduce database and zookeeper queries and replace worker address input with multiple-select (#5182)

* [Improvement][Server] Optimize and reduce database and ZooKeeper query

* [Improvement][WorkerGroup] Replace worker address input with multiple-select

* [Improvement][WorkerGroup] Fix unit test

* [Improvement][API&UI] Optimize long host to short host and long host display in k8s
This commit is contained in:
Shiwen Cheng 2021-04-14 21:39:39 +08:00 committed by GitHub
parent cc7a4446f5
commit 1c1f6663a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 421 additions and 300 deletions

View File

@ -14,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api;
import org.springframework.boot.SpringApplication;
@ -25,15 +26,12 @@ import org.springframework.context.annotation.FilterType;
@SpringBootApplication
@ServletComponentScan
@ComponentScan(basePackages = {"org.apache.dolphinscheduler"},
excludeFilters = @ComponentScan.Filter(type = FilterType.REGEX,
pattern = "org.apache.dolphinscheduler.server.*"))
@ComponentScan(value = "org.apache.dolphinscheduler",
excludeFilters = @ComponentScan.Filter(type = FilterType.REGEX, pattern = "org.apache.dolphinscheduler.server.*"))
public class ApiApplicationServer extends SpringBootServletInitializer {
public static void main(String[] args) {
SpringApplication.run(ApiApplicationServer.class, args);
}
public static void main(String[] args) {
SpringApplication.run(ApiApplicationServer.class, args);
}
}

View File

@ -18,6 +18,7 @@
package org.apache.dolphinscheduler.api.controller;
import static org.apache.dolphinscheduler.api.enums.Status.DELETE_WORKER_GROUP_FAIL;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_WORKER_ADDRESS_LIST_FAIL;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_WORKER_GROUP_FAIL;
import static org.apache.dolphinscheduler.api.enums.Status.SAVE_ERROR;
@ -159,4 +160,20 @@ public class WorkerGroupController extends BaseController {
return returnDataList(result);
}
/**
* query worker address list
*
* @param loginUser login user
* @return all worker address list
*/
@ApiOperation(value = "queryWorkerAddressList", notes = "QUERY_WORKER_ADDRESS_LIST_NOTES")
@GetMapping(value = "/worker-address-list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_WORKER_ADDRESS_LIST_FAIL)
public Result queryWorkerAddressList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
logger.info("query worker address list: login user {}", RegexUtils.escapeNRT(loginUser.getUserName()));
Map<String, Object> result = workerGroupService.getWorkerAddressList();
return returnDataList(result);
}
}

View File

@ -209,6 +209,7 @@ public enum Status {
CREATE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10175, "create worker group forbidden in docker ", "创建worker分组在docker中禁止"),
DELETE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10176, "delete worker group forbidden in docker ", "删除worker分组在docker中禁止"),
WORKER_ADDRESS_INVALID(10177, "worker address {0} invalid", "worker地址[{0}]无效"),
QUERY_WORKER_ADDRESS_LIST_FAIL(10178, "query worker address list fail ", "查询worker地址列表失败"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),

View File

@ -62,4 +62,11 @@ public interface WorkerGroupService {
*/
Map<String, Object> deleteWorkerGroupById(User loginUser, Integer id);
/**
* query all worker address list
*
* @return all worker address list
*/
Map<String, Object> getWorkerAddressList();
}

View File

@ -81,10 +81,6 @@ public class WorkerGroupServiceImpl extends BaseServiceImpl implements WorkerGro
if (isNotAdmin(loginUser, result)) {
return result;
}
if (Constants.DOCKER_MODE && !Constants.KUBERNETES_MODE) {
putMsg(result, Status.CREATE_WORKER_GROUP_FORBIDDEN_IN_DOCKER);
return result;
}
if (StringUtils.isEmpty(name)) {
putMsg(result, Status.NAME_NULL);
return result;
@ -303,10 +299,6 @@ public class WorkerGroupServiceImpl extends BaseServiceImpl implements WorkerGro
if (isNotAdmin(loginUser, result)) {
return result;
}
if (Constants.DOCKER_MODE && !Constants.KUBERNETES_MODE) {
putMsg(result, Status.DELETE_WORKER_GROUP_FORBIDDEN_IN_DOCKER);
return result;
}
WorkerGroup workerGroup = workerGroupMapper.selectById(id);
if (workerGroup == null) {
putMsg(result, Status.DELETE_WORKER_GROUP_NOT_EXIST);
@ -323,4 +315,18 @@ public class WorkerGroupServiceImpl extends BaseServiceImpl implements WorkerGro
return result;
}
/**
* query all worker address list
*
* @return all worker address list
*/
@Override
public Map<String, Object> getWorkerAddressList() {
Map<String, Object> result = new HashMap<>();
List<String> serverNodeList = zookeeperMonitor.getServerNodeList(ZKNodeType.WORKER, true);
result.put(Constants.DATA_LIST, serverNodeList);
putMsg(result, Status.SUCCESS);
return result;
}
}

View File

@ -86,6 +86,9 @@ public class NetUtils {
if (STS_PATTERN.matcher(host).find()) {
return String.format("%s.%s", host, host.replaceFirst("\\d+$", "headless"));
}
} else if (canonicalHost.contains(".")) {
String[] items = canonicalHost.split("\\.");
return String.format("%s.%s", items[0], items[1]);
}
return canonicalHost;
}

View File

@ -50,7 +50,7 @@ public class NetUtilsTest {
when(address.getHostAddress()).thenReturn("172.17.0.15");
assertEquals("172.17.0.15", NetUtils.getHost(address));
CommonTest.setFinalStatic(Constants.class.getDeclaredField("KUBERNETES_MODE"), true);
assertEquals("dolphinscheduler-worker-0.dolphinscheduler-worker-headless.default.svc.cluster.local", NetUtils.getHost(address));
assertEquals("dolphinscheduler-worker-0.dolphinscheduler-worker-headless", NetUtils.getHost(address));
address = mock(InetAddress.class);
when(address.getCanonicalHostName()).thenReturn("dolphinscheduler-worker-0");
when(address.getHostName()).thenReturn("dolphinscheduler-worker-0");

View File

@ -28,7 +28,6 @@ import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor;
import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor;
import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor;
import org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService;
import org.apache.dolphinscheduler.server.worker.WorkerServer;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
@ -45,8 +44,15 @@ import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.transaction.annotation.EnableTransactionManagement;
/**
* master server
*/
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.ASSIGNABLE_TYPE, classes = {WorkerServer.class})
@ComponentScan.Filter(type = FilterType.REGEX, pattern = {
"org.apache.dolphinscheduler.server.worker.*",
"org.apache.dolphinscheduler.server.monitor.*",
"org.apache.dolphinscheduler.server.log.*"
})
})
@EnableTransactionManagement
public class MasterServer implements IStoppable {

View File

@ -17,8 +17,6 @@
package org.apache.dolphinscheduler.server.master.dispatch.executor;
import org.apache.commons.collections.CollectionUtils;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.remote.command.Command;
@ -31,17 +29,22 @@ import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteExce
import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor;
import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor;
import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.commons.collections.CollectionUtils;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import javax.annotation.PostConstruct;
import java.util.*;
/**
* netty executor manager
*/
@ -51,10 +54,10 @@ public class NettyExecutorManager extends AbstractExecutorManager<Boolean>{
private final Logger logger = LoggerFactory.getLogger(NettyExecutorManager.class);
/**
* zookeeper node manager
* server node manager
*/
@Autowired
private ZookeeperNodeManager zookeeperNodeManager;
private ServerNodeManager serverNodeManager;
/**
* netty remote client
@ -182,7 +185,7 @@ public class NettyExecutorManager extends AbstractExecutorManager<Boolean>{
ExecutorType executorType = context.getExecutorType();
switch (executorType){
case WORKER:
nodes = zookeeperNodeManager.getWorkerGroupNodes(context.getWorkerGroup());
nodes = serverNodeManager.getWorkerGroupNodes(context.getWorkerGroup());
break;
case CLIENT:
break;

View File

@ -18,55 +18,32 @@
package org.apache.dolphinscheduler.server.master.dispatch.host;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.ResInfo;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWorker;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.springframework.beans.factory.annotation.Autowired;
/**
* round robin host manager
* common host manager
*/
public abstract class CommonHostManager implements HostManager {
/**
* zookeeper registry center
* server node manager
*/
@Autowired
protected ZookeeperRegistryCenter registryCenter;
/**
* zookeeper node manager
*/
@Autowired
protected ZookeeperNodeManager zookeeperNodeManager;
/**
* zk master client
*/
@Autowired
protected ZKMasterClient zkMasterClient;
/**
* worker group mapper
*/
@Autowired
protected WorkerGroupMapper workerGroupMapper;
protected ServerNodeManager serverNodeManager;
/**
* select host
@ -80,10 +57,7 @@ public abstract class CommonHostManager implements HostManager {
ExecutorType executorType = context.getExecutorType();
switch (executorType) {
case WORKER:
candidates = getHostWorkersFromDatabase(workerGroup);
if (candidates.isEmpty()) {
candidates = getHostWorkersFromZookeeper(workerGroup);
}
candidates = getWorkerCandidates(workerGroup);
break;
case CLIENT:
break;
@ -99,31 +73,12 @@ public abstract class CommonHostManager implements HostManager {
protected abstract HostWorker select(Collection<HostWorker> nodes);
protected List<HostWorker> getHostWorkersFromDatabase(String workerGroup) {
protected List<HostWorker> getWorkerCandidates(String workerGroup) {
List<HostWorker> hostWorkers = new ArrayList<>();
List<WorkerGroup> workerGroups = workerGroupMapper.queryWorkerGroupByName(workerGroup);
if (CollectionUtils.isNotEmpty(workerGroups)) {
Map<String, String> serverMaps = zkMasterClient.getServerMaps(ZKNodeType.WORKER, true);
for (WorkerGroup wg : workerGroups) {
for (String addr : wg.getAddrList().split(Constants.COMMA)) {
if (serverMaps.containsKey(addr)) {
String heartbeat = serverMaps.get(addr);
int hostWeight = getWorkerHostWeightFromHeartbeat(heartbeat);
hostWorkers.add(HostWorker.of(addr, hostWeight, workerGroup));
}
}
}
}
return hostWorkers;
}
protected List<HostWorker> getHostWorkersFromZookeeper(String workerGroup) {
List<HostWorker> hostWorkers = new ArrayList<>();
Collection<String> nodes = zookeeperNodeManager.getWorkerGroupNodes(workerGroup);
Set<String> nodes = serverNodeManager.getWorkerGroupNodes(workerGroup);
if (CollectionUtils.isNotEmpty(nodes)) {
for (String node : nodes) {
String workerGroupPath = registryCenter.getWorkerGroupPath(workerGroup);
String heartbeat = registryCenter.getRegisterOperator().get(workerGroupPath + "/" + node);
String heartbeat = serverNodeManager.getWorkerNodeInfo(node);
int hostWeight = getWorkerHostWeightFromHeartbeat(heartbeat);
hostWorkers.add(HostWorker.of(node, hostWeight, workerGroup));
}
@ -142,12 +97,4 @@ public abstract class CommonHostManager implements HostManager {
return hostWeight;
}
public void setZookeeperNodeManager(ZookeeperNodeManager zookeeperNodeManager) {
this.zookeeperNodeManager = zookeeperNodeManager;
}
public ZookeeperNodeManager getZookeeperNodeManager() {
return zookeeperNodeManager;
}
}

View File

@ -17,7 +17,6 @@
package org.apache.dolphinscheduler.server.master.dispatch.host;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;

View File

@ -18,11 +18,9 @@
package org.apache.dolphinscheduler.server.master.dispatch.host;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.ResInfo;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
@ -30,11 +28,9 @@ import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWeight
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.HostWorker;
import org.apache.dolphinscheduler.server.master.dispatch.host.assign.LowerWeightRoundRobin;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@ -51,17 +47,12 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* round robin host manager
* lower weight host manager
*/
public class LowerWeightHostManager extends CommonHostManager {
private final Logger logger = LoggerFactory.getLogger(LowerWeightHostManager.class);
/**
* round robin host manager
*/
private RoundRobinHostManager roundRobinHostManager;
/**
* selector
*/
@ -89,8 +80,6 @@ public class LowerWeightHostManager extends CommonHostManager {
this.lock = new ReentrantLock();
this.executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("LowerWeightHostManagerExecutor"));
this.executorService.scheduleWithFixedDelay(new RefreshResourceTask(),0, 5, TimeUnit.SECONDS);
this.roundRobinHostManager = new RoundRobinHostManager();
this.roundRobinHostManager.setZookeeperNodeManager(getZookeeperNodeManager());
}
@PreDestroy
@ -142,42 +131,21 @@ public class LowerWeightHostManager extends CommonHostManager {
public void run() {
try {
Map<String, Set<HostWeight>> workerHostWeights = new HashMap<>();
// from database
List<WorkerGroup> workerGroups = workerGroupMapper.queryAllWorkerGroup();
if (CollectionUtils.isNotEmpty(workerGroups)) {
Map<String, String> serverMaps = zkMasterClient.getServerMaps(ZKNodeType.WORKER, true);
for (WorkerGroup wg : workerGroups) {
String workerGroup = wg.getName();
List<String> addrs = Arrays.asList(wg.getAddrList().split(Constants.COMMA));
Set<HostWeight> hostWeights = new HashSet<>(addrs.size());
for (String addr : addrs) {
if (serverMaps.containsKey(addr)) {
String heartbeat = serverMaps.get(addr);
HostWeight hostWeight = getHostWeight(addr, workerGroup, heartbeat);
if (hostWeight != null) {
hostWeights.add(hostWeight);
}
}
}
workerHostWeights.put(workerGroup, hostWeights);
}
}
// from zookeeper
Map<String, Set<String>> workerGroupNodes = zookeeperNodeManager.getWorkerGroupNodes();
Set<Map.Entry<String, Set<String>>> entries = workerGroupNodes.entrySet();
for (Map.Entry<String, Set<String>> entry : entries) {
Map<String, Set<String>> workerGroupNodes = serverNodeManager.getWorkerGroupNodes();
for (Map.Entry<String, Set<String>> entry : workerGroupNodes.entrySet()) {
String workerGroup = entry.getKey();
Set<String> nodes = entry.getValue();
String workerGroupPath = registryCenter.getWorkerGroupPath(workerGroup);
Set<HostWeight> hostWeights = new HashSet<>(nodes.size());
for (String node : nodes) {
String heartbeat = registryCenter.getRegisterOperator().get(workerGroupPath + "/" + node);
String heartbeat = serverNodeManager.getWorkerNodeInfo(node);
HostWeight hostWeight = getHostWeight(node, workerGroup, heartbeat);
if (hostWeight != null) {
hostWeights.add(hostWeight);
}
}
workerHostWeights.put(workerGroup, hostWeights);
if (!hostWeights.isEmpty()) {
workerHostWeights.put(workerGroup, hostWeights);
}
}
syncWorkerHostWeight(workerHostWeights);
} catch (Throwable ex) {

View File

@ -23,7 +23,7 @@ import org.apache.dolphinscheduler.server.master.dispatch.host.assign.RandomSele
import java.util.Collection;
/**
* round robin host manager
* random host manager
*/
public class RandomHostManager extends CommonHostManager {

View File

@ -22,7 +22,6 @@ import org.apache.dolphinscheduler.server.master.dispatch.host.assign.RoundRobin
import java.util.Collection;
/**
* round robin host manager
*/
@ -36,7 +35,7 @@ public class RoundRobinHostManager extends CommonHostManager {
/**
* set round robin
*/
public RoundRobinHostManager(){
public RoundRobinHostManager() {
this.selector = new RoundRobinSelector();
}

View File

@ -17,63 +17,103 @@
package org.apache.dolphinscheduler.server.registry;
import org.apache.commons.collections.CollectionUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.apache.dolphinscheduler.service.zk.AbstractListener;
import org.apache.dolphinscheduler.service.zk.AbstractZKClient;
import org.apache.commons.collections.CollectionUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.annotation.PreDestroy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
/**
* zookeeper node manager
* server node manager
*/
@Service
public class ZookeeperNodeManager implements InitializingBean {
public class ServerNodeManager implements InitializingBean {
private final Logger logger = LoggerFactory.getLogger(ZookeeperNodeManager.class);
private final Logger logger = LoggerFactory.getLogger(ServerNodeManager.class);
/**
* master lock
* master lock
*/
private final Lock masterLock = new ReentrantLock();
/**
* worker group lock
* worker group lock
*/
private final Lock workerGroupLock = new ReentrantLock();
/**
* worker group nodes
* worker node info lock
*/
private final Lock workerNodeInfoLock = new ReentrantLock();
/**
* worker group nodes
*/
private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>();
/**
* master nodes
* master nodes
*/
private final Set<String> masterNodes = new HashSet<>();
/**
* worker node info
*/
private final Map<String, String> workerNodeInfo = new HashMap<>();
/**
* executor service
*/
private ScheduledExecutorService executorService;
/**
* zk client
*/
@Autowired
private ZKClient zkClient;
/**
* zookeeper registry center
*/
@Autowired
private ZookeeperRegistryCenter registryCenter;
/**
* worker group mapper
*/
@Autowired
private WorkerGroupMapper workerGroupMapper;
/**
* alert dao
*/
@ -87,9 +127,14 @@ public class ZookeeperNodeManager implements InitializingBean {
@Override
public void afterPropertiesSet() throws Exception {
/**
* load nodes from zookeeper
* load nodes from zookeeper
*/
load();
/**
* init executor service
*/
executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ServerNodeManagerExecutor"));
executorService.scheduleWithFixedDelay(new WorkerNodeInfoAndGroupDbSyncTask(), 0, 10, TimeUnit.SECONDS);
/**
* init MasterNodeListener listener
*/
@ -103,22 +148,59 @@ public class ZookeeperNodeManager implements InitializingBean {
/**
* load nodes from zookeeper
*/
private void load(){
private void load() {
/**
* master nodes from zookeeper
*/
Set<String> masterNodes = registryCenter.getMasterNodesDirectly();
syncMasterNodes(masterNodes);
Set<String> initMasterNodes = registryCenter.getMasterNodesDirectly();
syncMasterNodes(initMasterNodes);
/**
* worker group nodes from zookeeper
*/
Set<String> workerGroups = registryCenter.getWorkerGroupDirectly();
for(String workerGroup : workerGroups){
for (String workerGroup : workerGroups) {
syncWorkerGroupNodes(workerGroup, registryCenter.getWorkerGroupNodesDirectly(workerGroup));
}
}
/**
* zookeeper client
*/
@Component
static class ZKClient extends AbstractZKClient {}
/**
* worker node info and worker group db sync task
*/
class WorkerNodeInfoAndGroupDbSyncTask implements Runnable {
@Override
public void run() {
// sync worker node info
Map<String, String> newWorkerNodeInfo = zkClient.getServerMaps(ZKNodeType.WORKER, true);
syncWorkerNodeInfo(newWorkerNodeInfo);
// sync worker group nodes from database
List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup();
if (CollectionUtils.isNotEmpty(workerGroupList)) {
for (WorkerGroup wg : workerGroupList) {
String workerGroup = wg.getName();
Set<String> nodes = new HashSet<>();
String[] addrs = wg.getAddrList().split(Constants.COMMA);
for (String addr : addrs) {
if (newWorkerNodeInfo.containsKey(addr)) {
nodes.add(addr);
}
}
if (!nodes.isEmpty()) {
syncWorkerGroupNodes(workerGroup, nodes);
}
}
}
}
}
/**
* worker group node listener
*/
@ -126,7 +208,7 @@ public class ZookeeperNodeManager implements InitializingBean {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(registryCenter.isWorkerPath(path)){
if (registryCenter.isWorkerPath(path)) {
try {
if (event.getType() == TreeCacheEvent.Type.NODE_ADDED) {
logger.info("worker group node : {} added.", path);
@ -141,25 +223,23 @@ public class ZookeeperNodeManager implements InitializingBean {
syncWorkerGroupNodes(group, currentNodes);
alertDao.sendServerStopedAlert(1, path, "WORKER");
}
} catch (IllegalArgumentException ignore) {
logger.warn(ignore.getMessage());
} catch (IllegalArgumentException ex) {
logger.warn(ex.getMessage());
} catch (Exception ex) {
logger.error("WorkerGroupListener capture data change and get data failed", ex);
}
}
}
private String parseGroup(String path){
String[] parts = path.split("\\/");
private String parseGroup(String path) {
String[] parts = path.split("/");
if (parts.length < 6) {
throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path));
}
String group = parts[parts.length - 2];
return group;
return parts[parts.length - 2];
}
}
/**
* master node listener
*/
@ -203,7 +283,7 @@ public class ZookeeperNodeManager implements InitializingBean {
* sync master nodes
* @param nodes master nodes
*/
private void syncMasterNodes(Set<String> nodes){
private void syncMasterNodes(Set<String> nodes) {
masterLock.lock();
try {
masterNodes.clear();
@ -218,7 +298,7 @@ public class ZookeeperNodeManager implements InitializingBean {
* @param workerGroup worker group
* @param nodes worker nodes
*/
private void syncWorkerGroupNodes(String workerGroup, Set<String> nodes){
private void syncWorkerGroupNodes(String workerGroup, Set<String> nodes) {
workerGroupLock.lock();
try {
workerGroup = workerGroup.toLowerCase();
@ -231,7 +311,7 @@ public class ZookeeperNodeManager implements InitializingBean {
}
}
public Map<String, Set<String>> getWorkerGroupNodes(){
public Map<String, Set<String>> getWorkerGroupNodes() {
return Collections.unmodifiableMap(workerGroupNodes);
}
@ -240,15 +320,15 @@ public class ZookeeperNodeManager implements InitializingBean {
* @param workerGroup workerGroup
* @return worker nodes
*/
public Set<String> getWorkerGroupNodes(String workerGroup){
public Set<String> getWorkerGroupNodes(String workerGroup) {
workerGroupLock.lock();
try {
if(StringUtils.isEmpty(workerGroup)){
workerGroup = DEFAULT_WORKER_GROUP;
if (StringUtils.isEmpty(workerGroup)) {
workerGroup = Constants.DEFAULT_WORKER_GROUP;
}
workerGroup = workerGroup.toLowerCase();
Set<String> nodes = workerGroupNodes.get(workerGroup);
if(CollectionUtils.isNotEmpty(nodes)){
if (CollectionUtils.isNotEmpty(nodes)) {
return Collections.unmodifiableSet(nodes);
}
return nodes;
@ -258,9 +338,48 @@ public class ZookeeperNodeManager implements InitializingBean {
}
/**
* close
* get worker node info
* @return worker node info
*/
public void close(){
public Map<String, String> getWorkerNodeInfo() {
return Collections.unmodifiableMap(workerNodeInfo);
}
/**
* get worker node info
* @param workerNode worker node
* @return worker node info
*/
public String getWorkerNodeInfo(String workerNode) {
workerNodeInfoLock.lock();
try {
return workerNodeInfo.getOrDefault(workerNode, null);
} finally {
workerNodeInfoLock.unlock();
}
}
/**
* sync worker node info
* @param newWorkerNodeInfo new worker node info
*/
private void syncWorkerNodeInfo(Map<String, String> newWorkerNodeInfo) {
workerNodeInfoLock.lock();
try {
workerNodeInfo.clear();
workerNodeInfo.putAll(newWorkerNodeInfo);
} finally {
workerNodeInfoLock.unlock();
}
}
/**
* destroy
*/
@PreDestroy
public void destroy() {
executorService.shutdownNow();
registryCenter.close();
}
}

View File

@ -45,12 +45,21 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.FilterType;
import org.springframework.transaction.annotation.EnableTransactionManagement;
/**
* worker server
*/
@ComponentScan("org.apache.dolphinscheduler")
@ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = {
@ComponentScan.Filter(type = FilterType.REGEX, pattern = {
"org.apache.dolphinscheduler.server.master.*",
"org.apache.dolphinscheduler.server.monitor.*",
"org.apache.dolphinscheduler.server.log.*",
"org.apache.dolphinscheduler.server.zk.ZKMasterClient",
"org.apache.dolphinscheduler.server.registry.ServerNodeManager"
})
})
@EnableTransactionManagement
public class WorkerServer implements IStoppable {

View File

@ -39,7 +39,7 @@ import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.master.registry.MasterRegistry;
import org.apache.dolphinscheduler.server.registry.DependencyConfig;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.zk.SpringZKServer;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
@ -70,7 +70,7 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes = {DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class, CuratorZookeeperClient.class,
NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, ZKMasterClient.class, TaskPriorityQueueConsumer.class,
ZookeeperNodeManager.class, RegisterOperator.class, ZookeeperConfig.class, MasterConfig.class, MasterRegistry.class,
ServerNodeManager.class, RegisterOperator.class, ZookeeperConfig.class, MasterConfig.class, MasterRegistry.class,
CuratorZookeeperClient.class, SpringConnectionFactory.class})
public class TaskPriorityQueueConsumerTest {

View File

@ -14,16 +14,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.dispatch;
import org.apache.dolphinscheduler.dao.datasource.SpringConnectionFactory;
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.config.NettyServerConfig;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.registry.DependencyConfig;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.utils.ExecutionContextTestUtils;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
@ -34,6 +35,7 @@ import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.zk.CuratorZookeeperClient;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
@ -47,7 +49,8 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class, WorkerRegistry.class,
NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, WorkerConfig.class,
ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, CuratorZookeeperClient.class})
ServerNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, CuratorZookeeperClient.class,
SpringConnectionFactory.class})
public class ExecutorDispatcherTest {
@Autowired

View File

@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.server.master.dispatch.executor;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.datasource.SpringConnectionFactory;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
@ -30,7 +31,7 @@ import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionConte
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.server.registry.DependencyConfig;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor;
@ -40,6 +41,7 @@ import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.zk.CuratorZookeeperClient;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -53,8 +55,9 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes={DependencyConfig.class, SpringZKServer.class, WorkerRegistry.class,
ZookeeperNodeManager.class, ZookeeperRegistryCenter.class, WorkerConfig.class, CuratorZookeeperClient.class,
ZookeeperCachedOperator.class, ZookeeperConfig.class, SpringApplicationContext.class, NettyExecutorManager.class})
ServerNodeManager.class, ZookeeperRegistryCenter.class, WorkerConfig.class, CuratorZookeeperClient.class,
ZookeeperCachedOperator.class, ZookeeperConfig.class, SpringApplicationContext.class, NettyExecutorManager.class,
SpringConnectionFactory.class})
public class NettyExecutorManagerTest {
@Autowired

View File

@ -17,12 +17,10 @@
package org.apache.dolphinscheduler.server.master.dispatch.host;
import com.google.common.collect.Sets;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.dolphinscheduler.server.utils.ExecutionContextTestUtils;
import org.junit.Assert;
@ -33,6 +31,8 @@ import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import com.google.common.collect.Sets;
/**
* round robin host manager test
@ -40,16 +40,15 @@ import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class RoundRobinHostManagerTest {
@Mock
private ZookeeperNodeManager zookeeperNodeManager;
private ServerNodeManager serverNodeManager;
@InjectMocks
RoundRobinHostManager roundRobinHostManager;
@Test
public void testSelectWithEmptyResult() {
Mockito.when(zookeeperNodeManager.getWorkerGroupNodes("default")).thenReturn(null);
Mockito.when(serverNodeManager.getWorkerGroupNodes("default")).thenReturn(null);
ExecutionContext context = ExecutionContextTestUtils.getExecutionContext(10000);
Host emptyHost = roundRobinHostManager.select(context);
Assert.assertTrue(StringUtils.isEmpty(emptyHost.getAddress()));
@ -57,7 +56,7 @@ public class RoundRobinHostManagerTest {
@Test
public void testSelectWithResult() {
Mockito.when(zookeeperNodeManager.getWorkerGroupNodes("default")).thenReturn(Sets.newHashSet("192.168.1.1:22:100"));
Mockito.when(serverNodeManager.getWorkerGroupNodes("default")).thenReturn(Sets.newHashSet("192.168.1.1:22"));
ExecutionContext context = ExecutionContextTestUtils.getExecutionContext(10000);
Host host = roundRobinHostManager.select(context);
Assert.assertTrue(StringUtils.isNotEmpty(host.getAddress()));

View File

@ -14,14 +14,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.registry;
import java.util.Map;
import java.util.Set;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.datasource.SpringConnectionFactory;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.registry.MasterRegistry;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
@ -30,6 +28,10 @@ import org.apache.dolphinscheduler.server.zk.SpringZKServer;
import org.apache.dolphinscheduler.service.zk.CuratorZookeeperClient;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import java.util.Map;
import java.util.Set;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -38,16 +40,16 @@ import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
/**
* zookeeper node manager test
* server node manager test
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes={DependencyConfig.class, SpringZKServer.class, MasterRegistry.class,WorkerRegistry.class,
ZookeeperRegistryCenter.class, MasterConfig.class, WorkerConfig.class,
ZookeeperCachedOperator.class, ZookeeperConfig.class, ZookeeperNodeManager.class, CuratorZookeeperClient.class})
public class ZookeeperNodeManagerTest {
ZookeeperRegistryCenter.class, MasterConfig.class, WorkerConfig.class, SpringConnectionFactory.class,
ZookeeperCachedOperator.class, ZookeeperConfig.class, ServerNodeManager.class, CuratorZookeeperClient.class})
public class ServerNodeManagerTest {
@Autowired
private ZookeeperNodeManager zookeeperNodeManager;
private ServerNodeManager serverNodeManager;
@Autowired
private MasterRegistry masterRegistry;
@ -68,11 +70,11 @@ public class ZookeeperNodeManagerTest {
public void testGetMasterNodes(){
masterRegistry.registry();
try {
//let the zookeeperNodeManager catch the registry event
//let the serverNodeManager catch the registry event
Thread.sleep(2000);
} catch (InterruptedException ignore) {
}
Set<String> masterNodes = zookeeperNodeManager.getMasterNodes();
Set<String> masterNodes = serverNodeManager.getMasterNodes();
Assert.assertTrue(CollectionUtils.isNotEmpty(masterNodes));
Assert.assertEquals(1, masterNodes.size());
Assert.assertEquals(NetUtils.getAddr(masterConfig.getListenPort()), masterNodes.iterator().next());
@ -83,11 +85,11 @@ public class ZookeeperNodeManagerTest {
public void testGetWorkerGroupNodes(){
workerRegistry.registry();
try {
//let the zookeeperNodeManager catch the registry event
//let the serverNodeManager catch the registry event
Thread.sleep(2000);
} catch (InterruptedException ignore) {
}
Map<String, Set<String>> workerGroupNodes = zookeeperNodeManager.getWorkerGroupNodes();
Map<String, Set<String>> workerGroupNodes = serverNodeManager.getWorkerGroupNodes();
Assert.assertEquals(1, workerGroupNodes.size());
Assert.assertEquals("default".trim(), workerGroupNodes.keySet().iterator().next());
workerRegistry.unRegistry();
@ -97,12 +99,11 @@ public class ZookeeperNodeManagerTest {
public void testGetWorkerGroupNodesWithParam(){
workerRegistry.registry();
try {
//let the zookeeperNodeManager catch the registry event
//let the serverNodeManager catch the registry event
Thread.sleep(3000);
} catch (InterruptedException ignore) {
}
Map<String, Set<String>> workerGroupNodes = zookeeperNodeManager.getWorkerGroupNodes();
Set<String> workerNodes = zookeeperNodeManager.getWorkerGroupNodes("default");
Set<String> workerNodes = serverNodeManager.getWorkerGroupNodes("default");
Assert.assertTrue(CollectionUtils.isNotEmpty(workerNodes));
Assert.assertEquals(1, workerNodes.size());
Assert.assertEquals(NetUtils.getAddr(workerConfig.getListenPort()), workerNodes.iterator().next());

View File

@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.server.worker.processor;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.datasource.SpringConnectionFactory;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.command.CommandType;
@ -34,7 +35,7 @@ import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor;
import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor;
import org.apache.dolphinscheduler.server.master.processor.queue.TaskResponseService;
import org.apache.dolphinscheduler.server.master.registry.MasterRegistry;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ServerNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
@ -63,25 +64,12 @@ import io.netty.channel.Channel;
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes = {
TaskCallbackServiceTestConfig.class,
SpringZKServer.class,
SpringApplicationContext.class,
MasterRegistry.class,
WorkerRegistry.class,
ZookeeperRegistryCenter.class,
MasterConfig.class,
WorkerConfig.class,
RegisterOperator.class,
ZookeeperConfig.class,
ZookeeperNodeManager.class,
TaskCallbackService.class,
TaskResponseService.class,
TaskAckProcessor.class,
TaskResponseProcessor.class,
TaskExecuteProcessor.class,
CuratorZookeeperClient.class,
TaskExecutionContextCacheManagerImpl.class,
WorkerManagerThread.class})
TaskCallbackServiceTestConfig.class, SpringZKServer.class, SpringApplicationContext.class,
SpringConnectionFactory.class, MasterRegistry.class, WorkerRegistry.class, ZookeeperRegistryCenter.class,
MasterConfig.class, WorkerConfig.class, RegisterOperator.class, ZookeeperConfig.class, ServerNodeManager.class,
TaskCallbackService.class, TaskResponseService.class, TaskAckProcessor.class, TaskResponseProcessor.class,
TaskExecuteProcessor.class, CuratorZookeeperClient.class, TaskExecutionContextCacheManagerImpl.class,
WorkerManagerThread.class})
public class TaskCallbackServiceTest {
@Autowired

View File

@ -29,9 +29,12 @@ import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -99,6 +102,28 @@ public abstract class AbstractZKClient extends RegisterOperator {
return serverList;
}
/**
* get server zk nodes.
*
* @param zkNodeType zookeeper node type
* @return result : list<zknode>
*/
public List<String> getServerZkNodes(ZKNodeType zkNodeType) {
String path = getZNodeParentPath(zkNodeType);
List<String> serverList = super.getChildrenKeys(path);
if (zkNodeType == ZKNodeType.WORKER) {
List<String> workerList = new ArrayList<>();
for (String group : serverList) {
List<String> groupServers = super.getChildrenKeys(path + Constants.SLASH + group);
for (String groupServer : groupServers) {
workerList.add(group + Constants.SLASH + groupServer);
}
}
serverList = workerList;
}
return serverList;
}
/**
* get server list map.
*
@ -110,17 +135,7 @@ public abstract class AbstractZKClient extends RegisterOperator {
Map<String, String> serverMap = new HashMap<>();
try {
String path = getZNodeParentPath(zkNodeType);
List<String> serverList = super.getChildrenKeys(path);
if (zkNodeType == ZKNodeType.WORKER) {
List<String> workerList = new ArrayList<>();
for (String group : serverList) {
List<String> groupServers = super.getChildrenKeys(path + Constants.SLASH + group);
for (String groupServer : groupServers) {
workerList.add(group + Constants.SLASH + groupServer);
}
}
serverList = workerList;
}
List<String> serverList = getServerZkNodes(zkNodeType);
for (String server : serverList) {
String host = server;
if (zkNodeType == ZKNodeType.WORKER && hostOnly) {
@ -145,10 +160,48 @@ public abstract class AbstractZKClient extends RegisterOperator {
return getServerMaps(zkNodeType, false);
}
/**
* get server node set.
*
* @param zkNodeType zookeeper node type
* @param hostOnly host only
* @return result : set<host>
*/
public Set<String> getServerNodeSet(ZKNodeType zkNodeType, boolean hostOnly) {
Set<String> serverSet = new HashSet<>();
try {
List<String> serverList = getServerZkNodes(zkNodeType);
for (String server : serverList) {
String host = server;
if (zkNodeType == ZKNodeType.WORKER && hostOnly) {
host = server.split(Constants.SLASH)[1];
}
serverSet.add(host);
}
} catch (Exception e) {
logger.error("get server node set failed", e);
}
return serverSet;
}
/**
* get server node list.
*
* @param zkNodeType zookeeper node type
* @param hostOnly host only
* @return result : list<host>
*/
public List<String> getServerNodeList(ZKNodeType zkNodeType, boolean hostOnly) {
Set<String> serverSet = getServerNodeSet(zkNodeType, hostOnly);
List<String> serverList = new ArrayList<>(serverSet);
Collections.sort(serverList);
return serverList;
}
/**
* check the zookeeper node already exists
*
* @param host host
* @param host host
* @param zkNodeType zookeeper node type
* @return true if exists
*/

View File

@ -64,7 +64,7 @@
<el-table-column prop="runTimes" :label="$t('Run Times')"></el-table-column>
<el-table-column prop="recovery" :label="$t('fault-tolerant sign')"></el-table-column>
<el-table-column prop="executorName" :label="$t('Executor')"></el-table-column>
<el-table-column prop="host" :label="$t('host')" min-width="190"></el-table-column>
<el-table-column prop="host" :label="$t('host')" min-width="210"></el-table-column>
<el-table-column :label="$t('Operation')" width="240" fixed="right">
<template slot-scope="scope">
<div v-show="scope.row.disabled">

View File

@ -59,7 +59,7 @@
</template>
</el-table-column>
<el-table-column prop="retryTimes" :label="$t('Retry Count')"></el-table-column>
<el-table-column :label="$t('host')" min-width="190">
<el-table-column :label="$t('host')" min-width="210">
<template slot-scope="scope">
<span>{{scope.row.host | filterNull}}</span>
</template>

View File

@ -19,7 +19,8 @@
ref="popover"
:ok-text="item ? $t('Edit') : $t('Submit')"
@ok="_ok"
@close="close">
@close="close"
style="width: 700px;">
<template slot="content">
<div class="create-worker-model">
<m-list-box-f>
@ -37,16 +38,7 @@
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('Worker Addresses')}}</template>
<template slot="content">
<el-input
:autosize="{ minRows: 4, maxRows: 6 }"
type="textarea"
size="mini"
v-model.trim="addrList"
:placeholder="$t('Please enter the worker addresses separated by commas')">
</el-input>
<div class="cwm-tip">
<span>{{$t('Note: Multiple worker addresses have been comma separated')}}</span>
</div>
<treeselect :options="this.workerAddressList" v-model="addrList" :multiple="true" :placeholder="$t('Please select the worker addresses')"></treeselect>
</template>
</m-list-box-f>
</div>
@ -58,6 +50,8 @@
import store from '@/conf/home/store'
import mPopover from '@/module/components/popup/popover'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
export default {
name: 'create-warning',
@ -66,11 +60,12 @@
store,
id: 0,
name: '',
addrList: ''
addrList: []
}
},
props: {
item: Object
item: Object,
workerAddressList: Object
},
methods: {
_ok () {
@ -79,35 +74,23 @@
this._submit()
}
},
checkIpAndPorts (addrs) {
let reg = /^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5]):\d{1,5}$/
return addrs.split(',').every(item => reg.test(item))
},
checkFqdnAndPorts (addrs) {
let reg = /^([\w-]+\.)*[\w-]+:\d{1,5}$/i
return addrs.split(',').every(item => reg.test(item))
},
_verification () {
// group name
if (!this.name) {
this.$message.warning(`${i18n.$t('Please enter group name')}`)
return false
}
if (!this.addrList) {
if (!this.addrList.length) {
this.$message.warning(`${i18n.$t('Worker addresses cannot be empty')}`)
return false
}
if (!this.checkIpAndPorts(this.addrList) && !this.checkFqdnAndPorts(this.addrList)) {
this.$message.warning(`${i18n.$t('Please enter the correct worker addresses')}`)
return false
}
return true
},
_submit () {
let param = {
id: this.id,
name: this.name,
addrList: this.addrList
addrList: this.addrList.join(',')
}
if (this.item) {
param.id = this.item.id
@ -131,20 +114,11 @@
if (this.item) {
this.id = this.item.id
this.name = this.item.name
this.addrList = this.item.addrList
this.addrList = this.item.addrList.split(',')
}
},
mounted () {
},
components: { mPopover, mListBoxF }
components: { mPopover, mListBoxF, Treeselect }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.create-worker-model {
.cwm-tip {
color: #999;
padding-top: 4px;
display: block;
}
}
</style>

View File

@ -24,8 +24,8 @@
:title="item ? $t('Edit worker group') : $t('Create worker group')"
v-if="createWorkerGroupDialog"
:visible.sync="createWorkerGroupDialog"
width="auto">
<m-create-worker :item="item" @onUpdate="onUpdate" @close="close"></m-create-worker>
width="50%">
<m-create-worker :item="item" :worker-address-list="workerAddressList" @onUpdate="onUpdate" @close="close"></m-create-worker>
</el-dialog>
</template>
</m-conditions>
@ -77,6 +77,7 @@
total: null,
isLoading: false,
workerGroupList: [],
workerAddressList: [],
searchParams: {
pageSize: 10,
pageNo: 1,
@ -90,7 +91,7 @@
mixins: [listUrlParamHandle],
props: {},
methods: {
...mapActions('security', ['getWorkerGroups']),
...mapActions('security', ['getWorkerGroups', 'getWorkerAddresses']),
/**
* Inquire
*/
@ -135,6 +136,11 @@
}).catch(e => {
this.isLoading = false
})
},
_getWorkerAddressList () {
this.getWorkerAddresses().then(res => {
this.workerAddressList = res.data.map(x => ({ id: x, label: x }))
})
}
},
watch: {
@ -144,7 +150,9 @@
this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo
}
},
created () {},
created () {
this._getWorkerAddressList()
},
mounted () {
},
components: { mList, mListConstruction, mConditions, mSpin, mNoData, mCreateWorker }

View File

@ -630,5 +630,14 @@ export default {
reject(e)
})
})
},
getWorkerAddresses ({ state }, payload) {
return new Promise((resolve, reject) => {
io.get('worker-group/worker-address-list', payload, res => {
resolve(res)
}).catch(e => {
reject(e)
})
})
}
}

View File

@ -504,8 +504,7 @@ export default {
'Edit token': 'Edit token',
Addresses: 'Addresses',
'Worker Addresses': 'Worker Addresses',
'Please enter the worker addresses separated by commas': 'Please enter the worker addresses separated by commas',
'Note: Multiple worker addresses have been comma separated': 'Note: Multiple worker addresses have been comma separated',
'Please select the worker addresses': 'Please select the worker addresses',
'Failure time': 'Failure time',
'Expiration time': 'Expiration time',
User: 'User',
@ -571,7 +570,6 @@ export default {
'Please Enter Http Condition': 'Please Enter Http Condition',
'There is no data for this period of time': 'There is no data for this period of time',
'Worker addresses cannot be empty': 'Worker addresses cannot be empty',
'Please enter the correct worker addresses': 'Please enter the correct worker addresses',
'Please generate token': 'Please generate token',
'Spark Version': 'Spark Version',
TargetDataBase: 'target database',

View File

@ -504,8 +504,7 @@ export default {
'Edit token': '编辑令牌',
Addresses: '地址',
'Worker Addresses': 'Worker地址',
'Please enter the worker addresses separated by commas': '请输入Worker地址多个用英文逗号隔开',
'Note: Multiple worker addresses have been comma separated': '注意多个Worker地址以英文逗号分割',
'Please select the worker addresses': '请选择Worker地址',
'Failure time': '失效时间',
'Expiration time': '失效时间',
User: '用户',
@ -571,7 +570,6 @@ export default {
'Please Enter Http Condition': '请填写校验内容',
'There is no data for this period of time': '该时间段无数据',
'Worker addresses cannot be empty': 'Worker地址不能为空',
'Please enter the correct worker addresses': '请输入正确的Worker地址',
'Please generate token': '请生成Token',
'Spark Version': 'Spark版本',
TargetDataBase: '目标库',

View File

@ -934,7 +934,7 @@
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/register/ServerNodeManagerTest.java</include>
<include>**/server/register/ZookeeperRegistryCenterTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>

View File

@ -795,11 +795,12 @@ CREATE TABLE `t_ds_user` (
DROP TABLE IF EXISTS `t_ds_worker_group`;
CREATE TABLE `t_ds_worker_group` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`name` varchar(256) NULL DEFAULT NULL COMMENT 'worker group name',
`name` varchar(256) NOT NULL COMMENT 'worker group name',
`addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]',
`create_time` datetime NULL DEFAULT NULL COMMENT 'create time',
`update_time` datetime NULL DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
PRIMARY KEY (`id`),
UNIQUE KEY `name_unique` (`name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------

View File

@ -658,11 +658,12 @@ create index version_index on t_ds_version(version);
DROP TABLE IF EXISTS t_ds_worker_group;
CREATE TABLE t_ds_worker_group (
id bigint NOT NULL ,
name varchar(256) DEFAULT NULL ,
name varchar(256) NOT NULL ,
addr_list text DEFAULT NULL ,
create_time timestamp DEFAULT NULL ,
update_time timestamp DEFAULT NULL ,
PRIMARY KEY (id)
PRIMARY KEY (id) ,
CONSTRAINT name_unique UNIQUE (name)
) ;
--

View File

@ -28,6 +28,8 @@ BEGIN
AND COLUMN_NAME ='ip_list')
THEN
ALTER TABLE t_ds_worker_group CHANGE COLUMN `ip_list` `addr_list` text;
ALTER TABLE t_ds_worker_group MODIFY COLUMN `name` varchar(256) NOT NULL;
ALTER TABLE t_ds_worker_group ADD UNIQUE KEY `name_unique` (`name`);
END IF;
END;

View File

@ -15,7 +15,6 @@
* limitations under the License.
*/
-- uc_dolphin_T_t_ds_worker_group_A_ip_list
delimiter d//
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_worker_group_A_ip_list() RETURNS void AS $$
@ -24,8 +23,10 @@ BEGIN
WHERE TABLE_NAME='t_ds_worker_group'
AND COLUMN_NAME ='ip_list')
THEN
ALTER TABLE t_ds_worker_group rename ip_list TO addr_list;
ALTER TABLE t_ds_worker_group ALTER column addr_list type text;
ALTER TABLE t_ds_worker_group RENAME ip_list TO addr_list;
ALTER TABLE t_ds_worker_group ALTER COLUMN addr_list type text;
ALTER TABLE t_ds_worker_group ALTER COLUMN name type varchar(256), ALTER COLUMN name SET NOT NULL;
ALTER TABLE t_ds_worker_group ADD CONSTRAINT name_unique UNIQUE (name);
END IF;
END;
$$ LANGUAGE plpgsql;
@ -36,5 +37,5 @@ SELECT uc_dolphin_T_t_ds_worker_group_A_ip_list();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_worker_group_A_ip_list();
-- Add foreign key constraints for t_ds_task_instance --
delimiter ;
ALTER TABLE t_ds_task_instance ADD CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE;