分布式调度框架。
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

179 lines
6.6 KiB

/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
/**
* work group service
*/
@Service
public class WorkerGroupService extends BaseService {
private static final String NO_NODE_EXCEPTION_REGEX = "KeeperException$NoNodeException";
@Autowired
protected ZookeeperCachedOperator zookeeperCachedOperator;
@Autowired
ProcessInstanceMapper processInstanceMapper;
/**
* query worker group paging
*
* @param loginUser login user
* @param pageNo page number
* @param searchVal search value
* @param pageSize page size
* @return worker group list page
*/
public Map<String, Object> queryAllGroupPaging(User loginUser, Integer pageNo, Integer pageSize, String searchVal) {
// list from index
Integer fromIndex = (pageNo - 1) * pageSize;
// list to index
Integer toIndex = (pageNo - 1) * pageSize + pageSize;
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
List<WorkerGroup> workerGroups = getWorkerGroups(true);
List<WorkerGroup> resultDataList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(workerGroups)) {
List<WorkerGroup> searchValDataList = new ArrayList<>();
if (StringUtils.isNotEmpty(searchVal)) {
for (WorkerGroup workerGroup : workerGroups) {
if (workerGroup.getName().contains(searchVal)) {
searchValDataList.add(workerGroup);
}
}
} else {
searchValDataList = workerGroups;
}
if (searchValDataList.size() < pageSize) {
toIndex = (pageNo - 1) * pageSize + searchValDataList.size();
}
resultDataList = searchValDataList.subList(fromIndex, toIndex);
}
PageInfo<WorkerGroup> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount(resultDataList.size());
pageInfo.setLists(resultDataList);
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query all worker group
*
* @return all worker group list
*/
public Map<String, Object> queryAllGroup() {
Map<String, Object> result = new HashMap<>();
List<WorkerGroup> workerGroups = getWorkerGroups(false);
Set<String> availableWorkerGroupSet = workerGroups.stream()
.map(workerGroup -> workerGroup.getName())
.collect(Collectors.toSet());
result.put(Constants.DATA_LIST, availableWorkerGroupSet);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get worker groups
*
* @param isPaging whether paging
* @return WorkerGroup list
*/
private List<WorkerGroup> getWorkerGroups(boolean isPaging) {
String workerPath = zookeeperCachedOperator.getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_WORKERS;
List<WorkerGroup> workerGroups = new ArrayList<>();
List<String> workerGroupList;
try {
workerGroupList = zookeeperCachedOperator.getChildrenKeys(workerPath);
} catch (Exception e) {
if (e.getMessage().contains(NO_NODE_EXCEPTION_REGEX)) {
if (isPaging) {
return workerGroups;
} else {
//ignore noNodeException return Default
WorkerGroup wg = new WorkerGroup();
wg.setName(DEFAULT_WORKER_GROUP);
workerGroups.add(wg);
return workerGroups;
}
} else {
throw e;
}
}
for (String workerGroup : workerGroupList) {
String workerGroupPath = workerPath + "/" + workerGroup;
List<String> childrenNodes = zookeeperCachedOperator.getChildrenKeys(workerGroupPath);
String timeStamp = "";
for (int i = 0; i < childrenNodes.size(); i++) {
String ip = childrenNodes.get(i);
childrenNodes.set(i, ip.substring(0, ip.lastIndexOf(":")));
timeStamp = ip.substring(ip.lastIndexOf(":"));
}
if (CollectionUtils.isNotEmpty(childrenNodes)) {
WorkerGroup wg = new WorkerGroup();
wg.setName(workerGroup);
if (isPaging) {
wg.setIpList(childrenNodes);
String registeredIpValue = zookeeperCachedOperator.get(workerGroupPath + "/" + childrenNodes.get(0) + timeStamp);
wg.setCreateTime(DateUtils.stringToDate(registeredIpValue.split(",")[6]));
wg.setUpdateTime(DateUtils.stringToDate(registeredIpValue.split(",")[7]));
}
workerGroups.add(wg);
}
}
return workerGroups;
}
}