Browse Source

[Feature][Improvement] Support multi cluster environments - k8s type (#10096)

* service code

* [Feature][UI] Add front-end for cluster manage

* fix e2e

* remove comment on cluster controller

* doc

* img

* setting e2e.yaml

* test

* rerun e2e

* fix bug from comment

* Update index.tsx

use Nspace instead of css.

* Update index.tsx

Remove the style.

* Delete index.module.scss

Remove the useless file.

Co-authored-by: qianl4 <qianl4@cicso.com>
Co-authored-by: William Tong <weitong@cisco.com>
Co-authored-by: Amy0104 <97265214+Amy0104@users.noreply.github.com>
3.1.0-release
qianli2022 2 years ago committed by GitHub
parent
commit
ff065d8e5b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/e2e.yml
  2. 12
      docs/docs/en/guide/security.md
  3. 12
      docs/docs/zh/guide/security.md
  4. BIN
      docs/img/new_ui/dev/security/create-cluster.png
  5. 236
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ClusterController.java
  6. 129
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ClusterDto.java
  7. 18
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
  8. 24
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/k8s/K8sClientService.java
  9. 99
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ClusterService.java
  10. 335
      dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ClusterServiceImpl.java
  11. 202
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ClusterControllerTest.java
  12. 290
      dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ClusterServiceTest.java
  13. 48
      dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ClusterConfUtils.java
  14. 139
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Cluster.java
  15. 71
      dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ClusterMapper.java
  16. 55
      dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ClusterMapper.xml
  17. 23
      dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_h2.sql
  18. 19
      dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql
  19. 20
      dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_postgresql.sql
  20. 20
      dolphinscheduler-dao/src/main/resources/sql/upgrade/3.0.0_schema/mysql/dolphinscheduler_ddl.sql
  21. 14
      dolphinscheduler-dao/src/main/resources/sql/upgrade/3.0.0_schema/postgresql/dolphinscheduler_ddl.sql
  22. 191
      dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ClusterMapperTest.java
  23. 123
      dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/cases/ClusterE2ETest.java
  24. 151
      dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/pages/security/ClusterPage.java
  25. 12
      dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/pages/security/SecurityPage.java
  26. 8
      dolphinscheduler-ui/src/layouts/content/use-dataList.ts
  27. 1
      dolphinscheduler-ui/src/locales/en_US/menu.ts
  28. 32
      dolphinscheduler-ui/src/locales/en_US/project.ts
  29. 20
      dolphinscheduler-ui/src/locales/en_US/security.ts
  30. 1
      dolphinscheduler-ui/src/locales/zh_CN/menu.ts
  31. 8
      dolphinscheduler-ui/src/locales/zh_CN/project.ts
  32. 20
      dolphinscheduler-ui/src/locales/zh_CN/security.ts
  33. 11
      dolphinscheduler-ui/src/router/modules/security.ts
  34. 80
      dolphinscheduler-ui/src/service/modules/cluster/index.ts
  35. 72
      dolphinscheduler-ui/src/service/modules/cluster/types.ts
  36. 191
      dolphinscheduler-ui/src/views/security/cluster-manage/components/cluster-modal.tsx
  37. 118
      dolphinscheduler-ui/src/views/security/cluster-manage/components/use-modal.ts
  38. 170
      dolphinscheduler-ui/src/views/security/cluster-manage/index.tsx
  39. 236
      dolphinscheduler-ui/src/views/security/cluster-manage/use-table.ts

2
.github/workflows/e2e.yml

@ -98,6 +98,8 @@ jobs:
class: org.apache.dolphinscheduler.e2e.cases.QueueE2ETest
- name: Environment
class: org.apache.dolphinscheduler.e2e.cases.EnvironmentE2ETest
- name: Cluster
class: org.apache.dolphinscheduler.e2e.cases.ClusterE2ETest
- name: Token
class: org.apache.dolphinscheduler.e2e.cases.TokenE2ETest
- name: Workflow

12
docs/docs/en/guide/security.md

@ -150,6 +150,18 @@ worker.groups=default,test
![use-environment](../../../img/new_ui/dev/security/use-environment.png)
## Cluster Management
> Add or update cluster
- Each process can be related to zero or several clusters to support multiple environment, now just support k8s.
> Usage cluster
- After creation and authorization, k8s namespaces and processes will associate clusters. Each cluster will have separate workflows and task instances running independently.
![create-cluster](../../../img/new_ui/dev/security/create-cluster.png)
## Namespace Management
> Add or update k8s cluster

12
docs/docs/zh/guide/security.md

@ -149,6 +149,18 @@ worker.groups=default,test
![use-environment](../../../img/new_ui/dev/security/use-environment.png)
## 集群管理
> 创建/更新 集群
- 每个工作流可以绑定零到若干个集群用来支持多集群,目前先用于k8s。
> 使用集群
- 创建和授权后,k8s命名空间和工作流会增加关联集群的功能。每一个集群会有独立的工作流和任务实例独立运行。
![create-cluster](../../../img/new_ui/dev/security/create-cluster.png)
## 命名空间管理
> 创建/更新 k8s集群

BIN
docs/img/new_ui/dev/security/create-cluster.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 627 KiB

236
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ClusterController.java

@ -0,0 +1,236 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import static org.apache.dolphinscheduler.api.enums.Status.CREATE_CLUSTER_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.DELETE_CLUSTER_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_CLUSTER_BY_CODE_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.QUERY_CLUSTER_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_CLUSTER_ERROR;
import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_CLUSTER_ERROR;
import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation;
import org.apache.dolphinscheduler.api.exceptions.ApiException;
import org.apache.dolphinscheduler.api.service.ClusterService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import java.util.Map;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestAttribute;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.ResponseStatus;
import org.springframework.web.bind.annotation.RestController;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import springfox.documentation.annotations.ApiIgnore;
/**
* cluster controller
*/
@Api(tags = "CLUSTER_TAG")
@RestController
@RequestMapping("cluster")
public class ClusterController extends BaseController {
@Autowired
private ClusterService clusterService;
/**
* create cluster
*
* @param loginUser login user
* @param name cluster name
* @param config config
* @param description description
* @return returns an error if it exists
*/
@ApiOperation(value = "createCluster", notes = "CREATE_CLUSTER_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "name", value = "CLUSTER_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "config", value = "CONFIG", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "CLUSTER_DESC", dataType = "String")
})
@PostMapping(value = "/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result createProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("name") String name,
@RequestParam("config") String config,
@RequestParam(value = "description", required = false) String description) {
Map<String, Object> result = clusterService.createCluster(loginUser, name, config, description);
return returnDataList(result);
}
/**
* update cluster
*
* @param loginUser login user
* @param code cluster code
* @param name cluster name
* @param config cluster config
* @param description description
* @return update result code
*/
@ApiOperation(value = "updateCluster", notes = "UPDATE_CLUSTER_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "code", value = "CLUSTER_CODE", required = true, dataType = "Long", example = "100"),
@ApiImplicitParam(name = "name", value = "CLUSTER_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "config", value = "CLUSTER_CONFIG", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "CLUSTER_DESC", dataType = "String"),
})
@PostMapping(value = "/update")
@ResponseStatus(HttpStatus.OK)
@ApiException(UPDATE_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result updateCluster(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("code") Long code,
@RequestParam("name") String name,
@RequestParam("config") String config,
@RequestParam(value = "description", required = false) String description) {
Map<String, Object> result = clusterService.updateClusterByCode(loginUser, code, name, config, description);
return returnDataList(result);
}
/**
* query cluster details by code
*
* @param clusterCode cluster code
* @return cluster detail information
*/
@ApiOperation(value = "queryClusterByCode", notes = "QUERY_CLUSTER_BY_CODE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "clusterCode", value = "CLUSTER_CODE", required = true, dataType = "Long", example = "100")
})
@GetMapping(value = "/query-by-code")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_CLUSTER_BY_CODE_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result queryClusterByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("clusterCode") Long clusterCode) {
Map<String, Object> result = clusterService.queryClusterByCode(clusterCode);
return returnDataList(result);
}
/**
* query cluster list paging
*
* @param searchVal search value
* @param pageSize page size
* @param pageNo page number
* @return cluster list which the login user have permission to see
*/
@ApiOperation(value = "queryClusterListPaging", notes = "QUERY_CLUSTER_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "20"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1")
})
@GetMapping(value = "/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result queryClusterListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize,
@RequestParam("pageNo") Integer pageNo
) {
Result result = checkPageParams(pageNo, pageSize);
if (!result.checkResult()) {
return result;
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = clusterService.queryClusterListPaging(pageNo, pageSize, searchVal);
return result;
}
/**
* delete cluster by code
*
* @param loginUser login user
* @param clusterCode cluster code
* @return delete result code
*/
@ApiOperation(value = "deleteClusterByCode", notes = "DELETE_CLUSTER_BY_CODE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "clusterCode", value = "CLUSTER_CODE", required = true, dataType = "Long", example = "100")
})
@PostMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result deleteCluster(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("clusterCode") Long clusterCode
) {
Map<String, Object> result = clusterService.deleteClusterByCode(loginUser, clusterCode);
return returnDataList(result);
}
/**
* query all cluster list
*
* @param loginUser login user
* @return all cluster list
*/
@ApiOperation(value = "queryAllClusterList", notes = "QUERY_ALL_CLUSTER_LIST_NOTES")
@GetMapping(value = "/query-cluster-list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result queryAllClusterList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) {
Map<String, Object> result = clusterService.queryAllClusterList();
return returnDataList(result);
}
/**
* verify cluster and cluster name
*
* @param loginUser login user
* @param clusterName cluster name
* @return true if the cluster name not exists, otherwise return false
*/
@ApiOperation(value = "verifyCluster", notes = "VERIFY_CLUSTER_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "clusterName", value = "CLUSTER_NAME", required = true, dataType = "String")
})
@PostMapping(value = "/verify-cluster")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_CLUSTER_ERROR)
@AccessLogAnnotation(ignoreRequestArgs = "loginUser")
public Result verifyCluster(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "clusterName") String clusterName
) {
Map<String, Object> result = clusterService.verifyCluster(clusterName);
return returnDataList(result);
}
}

129
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/ClusterDto.java

@ -0,0 +1,129 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto;
import java.util.Date;
import java.util.List;
/**
* ClusterDto
*/
public class ClusterDto {
private int id;
/**
* clluster code
*/
private Long code;
/**
* clluster name
*/
private String name;
/**
* config content
*/
private String config;
private String description;
private List<String> processDefinitions;
/**
* operator user id
*/
private Integer operator;
private Date createTime;
private Date updateTime;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Long getCode() {
return this.code;
}
public void setCode(Long code) {
this.code = code;
}
public String getConfig() {
return this.config;
}
public void setConfig(String config) {
this.config = config;
}
public String getDescription() {
return this.description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOperator() {
return this.operator;
}
public void setOperator(Integer operator) {
this.operator = operator;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Date updateTime) {
this.updateTime = updateTime;
}
public List<String> getProcessDefinitions() {
return processDefinitions;
}
public void setProcessDefinitions(List<String> processDefinitions) {
this.processDefinitions = processDefinitions;
}
}

18
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java

@ -364,6 +364,24 @@ public enum Status {
GET_DATASOURCE_OPTIONS_ERROR(1200017, "get datasource options error", "获取数据源Options错误"),
GET_DATASOURCE_TABLES_ERROR(1200018, "get datasource tables error", "获取数据源表列表错误"),
GET_DATASOURCE_TABLE_COLUMNS_ERROR(1200019, "get datasource table columns error", "获取数据源表列名错误"),
CREATE_CLUSTER_ERROR(120020, "create cluster error", "创建集群失败"),
CLUSTER_NAME_EXISTS(120021, "this cluster name [{0}] already exists", "集群名称[{0}]已经存在"),
CLUSTER_NAME_IS_NULL(120022, "this cluster name shouldn't be empty.", "集群名称不能为空"),
CLUSTER_CONFIG_IS_NULL(120023, "this cluster config shouldn't be empty.", "集群配置信息不能为空"),
UPDATE_CLUSTER_ERROR(120024, "update cluster [{0}] info error", "更新集群[{0}]信息失败"),
DELETE_CLUSTER_ERROR(120025, "delete cluster error", "删除集群信息失败"),
DELETE_CLUSTER_RELATED_TASK_EXISTS(120026, "this cluster has been used in tasks,so you can't delete it.", "该集群已经被任务使用,所以不能删除该集群信息"),
QUERY_CLUSTER_BY_NAME_ERROR(1200027, "not found cluster [{0}] ", "查询集群名称[{0}]信息不存在"),
QUERY_CLUSTER_BY_CODE_ERROR(1200028, "not found cluster [{0}] ", "查询集群编码[{0}]不存在"),
QUERY_CLUSTER_ERROR(1200029, "login user query cluster error", "分页查询集群列表错误"),
VERIFY_CLUSTER_ERROR(1200030, "verify cluster error", "验证集群信息错误"),
CLUSTER_PROCESS_DEFINITIONS_IS_INVALID(1200031, "cluster worker groups is invalid format", "集群关联的工作组参数解析错误"),
UPDATE_CLUSTER_PROCESS_DEFINITION_RELATION_ERROR(1200032, "You can't modify the process definition, because the process definition [{0}] and this cluster [{1}] already be used in the task [{2}]",
"您不能修改集群选项,因为该工作流组 [{0}] 和 该集群 [{1}] 已经被用在任务 [{2}] 中"),
CLUSTER_NOT_EXISTS(120033, "this cluster can not found in db.", "集群配置数据库里查询不到为空"),
DELETE_CLUSTER_RELATED_NAMESPACE_EXISTS(120034, "this cluster has been used in namespace,so you can't delete it.", "该集群已经被命名空间使用,所以不能删除该集群信息"),
TASK_GROUP_NAME_EXSIT(130001, "this task group name is repeated in a project", "该任务组名称在一个项目中已经使用"),
TASK_GROUP_SIZE_ERROR(130002, "task group size error", "任务组大小应该为大于1的整数"),
TASK_GROUP_STATUS_ERROR(130003, "task group status error", "任务组已经被关闭"),

24
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/k8s/K8sClientService.java

@ -66,35 +66,35 @@ public class K8sClientService {
//创建资源
ResourceQuota queryExist = client.resourceQuotas()
.inNamespace(k8sNamespace.getNamespace())
.withName(k8sNamespace.getNamespace())
.get();
.inNamespace(k8sNamespace.getNamespace())
.withName(k8sNamespace.getNamespace())
.get();
ResourceQuota body = yaml.loadAs(yamlStr, ResourceQuota.class);
if (queryExist != null) {
if (k8sNamespace.getLimitsCpu() == null && k8sNamespace.getLimitsMemory() == null) {
client.resourceQuotas().inNamespace(k8sNamespace.getNamespace())
.withName(k8sNamespace.getNamespace())
.delete();
.withName(k8sNamespace.getNamespace())
.delete();
return null;
}
}
return client.resourceQuotas().inNamespace(k8sNamespace.getNamespace())
.withName(k8sNamespace.getNamespace())
.createOrReplace(body);
.withName(k8sNamespace.getNamespace())
.createOrReplace(body);
}
private Optional<Namespace> getNamespaceFromK8s(String name, String k8s) {
NamespaceList listNamespace =
k8sManager.getK8sClient(k8s).namespaces().list();
k8sManager.getK8sClient(k8s).namespaces().list();
Optional<Namespace> list =
listNamespace.getItems().stream()
.filter((Namespace namespace) ->
namespace.getMetadata().getName().equals(name))
.findFirst();
listNamespace.getItems().stream()
.filter((Namespace namespace) ->
namespace.getMetadata().getName().equals(name))
.findFirst();
return list;
}

99
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ClusterService.java

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.dao.entity.User;
import java.util.Map;
/**
* cluster service
*/
public interface ClusterService {
/**
* create cluster
*
* @param loginUser login user
* @param name cluster name
* @param config cluster config
* @param desc cluster desc
*/
Map<String, Object> createCluster(User loginUser, String name, String config, String desc);
/**
* query cluster
*
* @param name cluster name
*/
Map<String, Object> queryClusterByName(String name);
/**
* query cluster
*
* @param code cluster code
*/
Map<String, Object> queryClusterByCode(Long code);
/**
* delete cluster
*
* @param loginUser login user
* @param code cluster code
*/
Map<String, Object> deleteClusterByCode(User loginUser, Long code);
/**
* update cluster
*
* @param loginUser login user
* @param code cluster code
* @param name cluster name
* @param config cluster config
* @param desc cluster desc
*/
Map<String, Object> updateClusterByCode(User loginUser, Long code, String name, String config, String desc);
/**
* query cluster paging
*
* @param pageNo page number
* @param searchVal search value
* @param pageSize page size
* @return cluster list page
*/
Result queryClusterListPaging(Integer pageNo, Integer pageSize, String searchVal);
/**
* query all cluster
*
* @return all cluster list
*/
Map<String, Object> queryAllClusterList();
/**
* verify cluster name
*
* @param clusterName cluster name
* @return true if the cluster name not exists, otherwise return false
*/
Map<String, Object> verifyCluster(String clusterName);
}

335
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ClusterServiceImpl.java

@ -0,0 +1,335 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import org.apache.dolphinscheduler.api.dto.ClusterDto;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ClusterService;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils;
import org.apache.dolphinscheduler.common.utils.CodeGenerateUtils.CodeGenerateException;
import org.apache.dolphinscheduler.dao.entity.Cluster;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ClusterMapper;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* cluster definition service impl
*/
@Service
public class ClusterServiceImpl extends BaseServiceImpl implements ClusterService {
private static final Logger logger = LoggerFactory.getLogger(ClusterServiceImpl.class);
@Autowired
private ClusterMapper clusterMapper;
/**
* create cluster
*
* @param loginUser login user
* @param name cluster name
* @param config cluster config
* @param desc cluster desc
*/
@Transactional(rollbackFor = RuntimeException.class)
@Override
public Map<String, Object> createCluster(User loginUser, String name, String config, String desc) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
Map<String, Object> checkResult = checkParams(name, config);
if (checkResult.get(Constants.STATUS) != Status.SUCCESS) {
return checkResult;
}
Cluster clusterExistByName = clusterMapper.queryByClusterName(name);
if (clusterExistByName != null) {
putMsg(result, Status.CLUSTER_NAME_EXISTS, name);
return result;
}
Cluster cluster = new Cluster();
cluster.setName(name);
cluster.setConfig(config);
cluster.setDescription(desc);
cluster.setOperator(loginUser.getId());
cluster.setCreateTime(new Date());
cluster.setUpdateTime(new Date());
long code = 0L;
try {
code = CodeGenerateUtils.getInstance().genCode();
cluster.setCode(code);
} catch (CodeGenerateException e) {
logger.error("Cluster code get error, ", e);
}
if (code == 0L) {
putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating cluster code");
return result;
}
if (clusterMapper.insert(cluster) > 0) {
result.put(Constants.DATA_LIST, cluster.getCode());
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.CREATE_CLUSTER_ERROR);
}
return result;
}
/**
* query cluster paging
*
* @param pageNo page number
* @param searchVal search value
* @param pageSize page size
* @return cluster list page
*/
@Override
public Result queryClusterListPaging(Integer pageNo, Integer pageSize, String searchVal) {
Result result = new Result();
Page<Cluster> page = new Page<>(pageNo, pageSize);
IPage<Cluster> clusterIPage = clusterMapper.queryClusterListPaging(page, searchVal);
PageInfo<ClusterDto> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotal((int) clusterIPage.getTotal());
if (CollectionUtils.isNotEmpty(clusterIPage.getRecords())) {
List<ClusterDto> dtoList = clusterIPage.getRecords().stream().map(cluster -> {
ClusterDto dto = new ClusterDto();
BeanUtils.copyProperties(cluster, dto);
return dto;
}).collect(Collectors.toList());
pageInfo.setTotalList(dtoList);
} else {
pageInfo.setTotalList(new ArrayList<>());
}
result.setData(pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query all cluster
*
* @return all cluster list
*/
@Override
public Map<String, Object> queryAllClusterList() {
Map<String, Object> result = new HashMap<>();
List<Cluster> clusterList = clusterMapper.queryAllClusterList();
if (CollectionUtils.isNotEmpty(clusterList)) {
List<ClusterDto> dtoList = clusterList.stream().map(cluster -> {
ClusterDto dto = new ClusterDto();
BeanUtils.copyProperties(cluster, dto);
return dto;
}).collect(Collectors.toList());
result.put(Constants.DATA_LIST, dtoList);
} else {
result.put(Constants.DATA_LIST, new ArrayList<>());
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query cluster
*
* @param code cluster code
*/
@Override
public Map<String, Object> queryClusterByCode(Long code) {
Map<String, Object> result = new HashMap<>();
Cluster cluster = clusterMapper.queryByClusterCode(code);
if (cluster == null) {
putMsg(result, Status.QUERY_CLUSTER_BY_CODE_ERROR, code);
} else {
ClusterDto dto = new ClusterDto();
BeanUtils.copyProperties(cluster, dto);
result.put(Constants.DATA_LIST, dto);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* query cluster
*
* @param name cluster name
*/
@Override
public Map<String, Object> queryClusterByName(String name) {
Map<String, Object> result = new HashMap<>();
Cluster cluster = clusterMapper.queryByClusterName(name);
if (cluster == null) {
putMsg(result, Status.QUERY_CLUSTER_BY_NAME_ERROR, name);
} else {
ClusterDto dto = new ClusterDto();
BeanUtils.copyProperties(cluster, dto);
result.put(Constants.DATA_LIST, dto);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* delete cluster
*
* @param loginUser login user
* @param code cluster code
*/
@Transactional(rollbackFor = RuntimeException.class)
@Override
public Map<String, Object> deleteClusterByCode(User loginUser, Long code) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
int delete = clusterMapper.deleteByCode(code);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_CLUSTER_ERROR);
}
return result;
}
/**
* update cluster
*
* @param loginUser login user
* @param code cluster code
* @param name cluster name
* @param config cluster config
* @param desc cluster desc
*/
@Transactional(rollbackFor = RuntimeException.class)
@Override
public Map<String, Object> updateClusterByCode(User loginUser, Long code, String name, String config, String desc) {
Map<String, Object> result = new HashMap<>();
if (isNotAdmin(loginUser, result)) {
return result;
}
Map<String, Object> checkResult = checkParams(name, config);
if (checkResult.get(Constants.STATUS) != Status.SUCCESS) {
return checkResult;
}
Cluster clusterExistByName = clusterMapper.queryByClusterName(name);
if (clusterExistByName != null && !clusterExistByName.getCode().equals(code)) {
putMsg(result, Status.CLUSTER_NAME_EXISTS, name);
return result;
}
Cluster clusterExist = clusterMapper.queryByClusterCode(code);
if (clusterExist == null) {
putMsg(result, Status.CLUSTER_NOT_EXISTS, name);
return result;
}
//update cluster
clusterExist.setConfig(config);
clusterExist.setName(name);
clusterExist.setDescription(desc);
clusterMapper.updateById(clusterExist);
//need not update relation
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify cluster name
*
* @param clusterName cluster name
* @return true if the cluster name not exists, otherwise return false
*/
@Override
public Map<String, Object> verifyCluster(String clusterName) {
Map<String, Object> result = new HashMap<>();
if (StringUtils.isEmpty(clusterName)) {
putMsg(result, Status.CLUSTER_NAME_IS_NULL);
return result;
}
Cluster cluster = clusterMapper.queryByClusterName(clusterName);
if (cluster != null) {
putMsg(result, Status.CLUSTER_NAME_EXISTS, clusterName);
return result;
}
result.put(Constants.STATUS, Status.SUCCESS);
return result;
}
public Map<String, Object> checkParams(String name, String config) {
Map<String, Object> result = new HashMap<>();
if (StringUtils.isEmpty(name)) {
putMsg(result, Status.CLUSTER_NAME_IS_NULL);
return result;
}
if (StringUtils.isEmpty(config)) {
putMsg(result, Status.CLUSTER_CONFIG_IS_NULL);
return result;
}
result.put(Constants.STATUS, Status.SUCCESS);
return result;
}
}

202
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ClusterControllerTest.java

@ -0,0 +1,202 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.base.Preconditions;
public class ClusterControllerTest extends AbstractControllerTest {
public static final String clusterName = "Cluster1";
public static final String config = "this is config content";
public static final String desc = "this is cluster description";
private static final Logger logger = LoggerFactory.getLogger(ClusterControllerTest.class);
private String clusterCode;
@Before
public void before() throws Exception {
testCreateCluster();
}
@Override
@After
public void after() throws Exception {
testDeleteCluster();
}
public void testCreateCluster() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name", clusterName);
paramsMap.add("config", config);
paramsMap.add("description", desc);
MvcResult mvcResult = mockMvc.perform(post("/cluster/create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), new TypeReference<Result<String>>() {
});
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
Assert.assertNotNull(result.getData());
logger.info("create cluster return result:{}", mvcResult.getResponse().getContentAsString());
clusterCode = (String) result.getData();
}
@Test
public void testUpdateCluster() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("code", clusterCode);
paramsMap.add("name", "cluster_test_update");
paramsMap.add("config", "{\"k8s\":\"apiVersion: v1\"}");
paramsMap.add("desc", "the test cluster update");
MvcResult mvcResult = mockMvc.perform(post("/cluster/update")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
logger.info("update cluster return result:{}", mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryClusterByCode() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("clusterCode", clusterCode);
MvcResult mvcResult = mockMvc.perform(get("/cluster/query-by-code")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
logger.info(mvcResult.getResponse().getContentAsString());
logger.info("query cluster by id :{}, return result:{}", clusterCode, mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryClusterListPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("searchVal", "test");
paramsMap.add("pageSize", "2");
paramsMap.add("pageNo", "2");
MvcResult mvcResult = mockMvc.perform(get("/cluster/list-paging")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
logger.info("query list-paging cluster return result:{}", mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryAllClusterList() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
MvcResult mvcResult = mockMvc.perform(get("/cluster/query-cluster-list")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
logger.info("query all cluster return result:{}", mvcResult.getResponse().getContentAsString());
}
@Test
public void testVerifyCluster() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("clusterName", clusterName);
MvcResult mvcResult = mockMvc.perform(post("/cluster/verify-cluster")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result.isStatus(Status.CLUSTER_NAME_EXISTS));
logger.info("verify cluster return result:{}", mvcResult.getResponse().getContentAsString());
}
private void testDeleteCluster() throws Exception {
Preconditions.checkNotNull(clusterCode);
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("clusterCode", clusterCode);
MvcResult mvcResult = mockMvc.perform(post("/cluster/delete")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
logger.info(result.toString());
Assert.assertTrue(result != null && result.isSuccess());
logger.info("delete cluster return result:{}", mvcResult.getResponse().getContentAsString());
}
}

290
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ClusterServiceTest.java

@ -0,0 +1,290 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.k8s.K8sManager;
import org.apache.dolphinscheduler.api.service.impl.ClusterServiceImpl;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.dao.entity.Cluster;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ClusterMapper;
import org.apache.dolphinscheduler.dao.mapper.K8sNamespaceMapper;
import org.apache.dolphinscheduler.remote.exceptions.RemotingException;
import org.apache.commons.collections.CollectionUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.assertj.core.util.Lists;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.configurationprocessor.json.JSONException;
import org.springframework.boot.configurationprocessor.json.JSONObject;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* cluster service test
*/
@RunWith(MockitoJUnitRunner.class)
public class ClusterServiceTest {
public static final Logger logger = LoggerFactory.getLogger(ClusterServiceTest.class);
@InjectMocks
private ClusterServiceImpl clusterService;
@Mock
private ClusterMapper clusterMapper;
@Mock
private K8sNamespaceMapper k8sNamespaceMapper;
@Mock
private K8sManager k8sManager;
public static final String testUserName = "clusterServerTest";
public static final String clusterName = "Env1";
@Before
public void setUp(){
}
@After
public void after(){
}
@Test
public void testCreateCluster() {
User loginUser = getGeneralUser();
Map<String, Object> result = clusterService.createCluster(loginUser,clusterName,getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
loginUser = getAdminUser();
result = clusterService.createCluster(loginUser,clusterName,"",getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_CONFIG_IS_NULL, result.get(Constants.STATUS));
result = clusterService.createCluster(loginUser,"",getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_IS_NULL, result.get(Constants.STATUS));
Mockito.when(clusterMapper.queryByClusterName(clusterName)).thenReturn(getCluster());
result = clusterService.createCluster(loginUser,clusterName,getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_EXISTS, result.get(Constants.STATUS));
Mockito.when(clusterMapper.insert(Mockito.any(Cluster.class))).thenReturn(1);
result = clusterService.createCluster(loginUser,"testName","testConfig","testDesc");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testCheckParams() {
Map<String, Object> result = clusterService.checkParams(clusterName,getConfig());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
result = clusterService.checkParams("",getConfig());
Assert.assertEquals(Status.CLUSTER_NAME_IS_NULL, result.get(Constants.STATUS));
result = clusterService.checkParams(clusterName,"");
Assert.assertEquals(Status.CLUSTER_CONFIG_IS_NULL, result.get(Constants.STATUS));
}
@Test
public void testUpdateClusterByCode() throws RemotingException {
User loginUser = getGeneralUser();
Map<String, Object> result = clusterService.updateClusterByCode(loginUser,1L,clusterName,getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
loginUser = getAdminUser();
result = clusterService.updateClusterByCode(loginUser,1L,clusterName,"",getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_CONFIG_IS_NULL, result.get(Constants.STATUS));
result = clusterService.updateClusterByCode(loginUser,1L,"",getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_IS_NULL, result.get(Constants.STATUS));
result = clusterService.updateClusterByCode(loginUser,2L,clusterName,getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NOT_EXISTS, result.get(Constants.STATUS));
Mockito.when(clusterMapper.queryByClusterName(clusterName)).thenReturn(getCluster());
result = clusterService.updateClusterByCode(loginUser,2L,clusterName,getConfig(),getDesc());
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_EXISTS, result.get(Constants.STATUS));
Mockito.when(clusterMapper.updateById(Mockito.any(Cluster.class))).thenReturn(1);
Mockito.when(clusterMapper.queryByClusterCode(1L)).thenReturn(getCluster());
result = clusterService.updateClusterByCode(loginUser,1L,"testName",getConfig(),"test");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testQueryAllClusterList() {
Mockito.when(clusterMapper.queryAllClusterList()).thenReturn(Lists.newArrayList(getCluster()));
Map<String, Object> result = clusterService.queryAllClusterList();
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Cluster> list = (List<Cluster>)(result.get(Constants.DATA_LIST));
Assert.assertEquals(1,list.size());
}
@Test
public void testQueryClusterListPaging() {
IPage<Cluster> page = new Page<>(1, 10);
page.setRecords(getList());
page.setTotal(1L);
Mockito.when(clusterMapper.queryClusterListPaging(Mockito.any(Page.class), Mockito.eq(clusterName))).thenReturn(page);
Result result = clusterService.queryClusterListPaging(1, 10, clusterName);
logger.info(result.toString());
PageInfo<Cluster> pageInfo = (PageInfo<Cluster>) result.getData();
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getTotalList()));
}
@Test
public void testQueryClusterByName() {
Mockito.when(clusterMapper.queryByClusterName(clusterName)).thenReturn(null);
Map<String, Object> result = clusterService.queryClusterByName(clusterName);
logger.info(result.toString());
Assert.assertEquals(Status.QUERY_CLUSTER_BY_NAME_ERROR,result.get(Constants.STATUS));
Mockito.when(clusterMapper.queryByClusterName(clusterName)).thenReturn(getCluster());
result = clusterService.queryClusterByName(clusterName);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
}
@Test
public void testQueryClusterByCode() {
Mockito.when(clusterMapper.queryByClusterCode(1L)).thenReturn(null);
Map<String, Object> result = clusterService.queryClusterByCode(1L);
logger.info(result.toString());
Assert.assertEquals(Status.QUERY_CLUSTER_BY_CODE_ERROR,result.get(Constants.STATUS));
Mockito.when(clusterMapper.queryByClusterCode(1L)).thenReturn(getCluster());
result = clusterService.queryClusterByCode(1L);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
}
@Test
public void testDeleteClusterByCode() {
User loginUser = getGeneralUser();
Map<String, Object> result = clusterService.deleteClusterByCode(loginUser,1L);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS));
loginUser = getAdminUser();
Mockito.when(clusterMapper.deleteByCode(1L)).thenReturn(1);
result = clusterService.deleteClusterByCode(loginUser,1L);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
}
@Test
public void testVerifyCluster() {
Map<String, Object> result = clusterService.verifyCluster("");
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_IS_NULL, result.get(Constants.STATUS));
Mockito.when(clusterMapper.queryByClusterName(clusterName)).thenReturn(getCluster());
result = clusterService.verifyCluster(clusterName);
logger.info(result.toString());
Assert.assertEquals(Status.CLUSTER_NAME_EXISTS, result.get(Constants.STATUS));
}
private Cluster getCluster() {
Cluster cluster = new Cluster();
cluster.setId(1);
cluster.setCode(1L);
cluster.setName(clusterName);
cluster.setConfig(getConfig());
cluster.setDescription(getDesc());
cluster.setOperator(1);
return cluster;
}
/**
* create an cluster description
*/
private String getDesc() {
return "create an cluster to test ";
}
/**
* create an cluster config
*/
private String getConfig() {
return "{\"k8s\":\"apiVersion: v1\\nclusters:\\n- cluster:\\n certificate-authority-data: LS0tLS1CRUdJTiBDRJUSUZJQ0FURS0tLS0tCg==\\n server: https:\\/\\/127.0.0.1:6443\\n name: kubernetes\\ncontexts:\\n- context:\\n cluster: kubernetes\\n user: kubernetes-admin\\n name: kubernetes-admin@kubernetes\\ncurrent-context: kubernetes-admin@kubernetes\\nkind: Config\\npreferences: {}\\nusers:\\n- name: kubernetes-admin\\n user:\\n client-certificate-data: LS0tLS1CRUdJTiBDRVJJ0cEhYYnBLRVktLS0tLQo=\"}\n";
}
/**
* create general user
*/
private User getGeneralUser() {
User loginUser = new User();
loginUser.setUserType(UserType.GENERAL_USER);
loginUser.setUserName(testUserName);
loginUser.setId(1);
return loginUser;
}
/**
* create admin user
*/
private User getAdminUser() {
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setUserName(testUserName);
loginUser.setId(1);
return loginUser;
}
private List<Cluster> getList() {
List<Cluster> list = new ArrayList<>();
list.add(getCluster());
return list;
}
}

48
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/ClusterConfUtils.java

@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.spi.utils.StringUtils;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* cluster conf will include all env type, but only k8s config now
*/
public class ClusterConfUtils {
private static final String K8S_CONFIG = "k8s";
/**
* get k8s
*
* @param config cluster config in db
* @return
*/
public static String getK8sConfig(String config) {
if (StringUtils.isEmpty(config)) {
return null;
}
ObjectNode conf = JSONUtils.parseObject(config);
if (conf == null) {
return null;
}
return conf.get(K8S_CONFIG).asText();
}
}

139
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Cluster.java

@ -0,0 +1,139 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.entity;
import java.util.Date;
import com.baomidou.mybatisplus.annotation.IdType;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
/**
* Cluster
*/
@TableName("t_ds_cluster")
public class Cluster {
@TableId(value = "id", type = IdType.AUTO)
private int id;
/**
* cluster code
*/
private Long code;
/**
* cluster name
*/
private String name;
/**
* config content
*/
private String config;
private String description;
/**
* operator user id
*/
private Integer operator;
private Date createTime;
private Date updateTime;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Long getCode() {
return this.code;
}
public void setCode(Long code) {
this.code = code;
}
public String getConfig() {
return this.config;
}
public void setConfig(String config) {
this.config = config;
}
public String getDescription() {
return this.description;
}
public void setDescription(String description) {
this.description = description;
}
public Integer getOperator() {
return this.operator;
}
public void setOperator(Integer operator) {
this.operator = operator;
}
public Date getCreateTime() {
return createTime;
}
public void setCreateTime(Date createTime) {
this.createTime = createTime;
}
public Date getUpdateTime() {
return updateTime;
}
public void setUpdateTime(Date updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "Cluster{"
+ "id= " + id
+ ", code= " + code
+ ", name= " + name
+ ", config= " + config
+ ", description= " + description
+ ", operator= " + operator
+ ", createTime= " + createTime
+ ", updateTime= " + updateTime
+ "}";
}
}

71
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ClusterMapper.java

@ -0,0 +1,71 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.dao.entity.Cluster;
import org.apache.ibatis.annotations.Param;
import java.util.List;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
/**
* cluster mapper interface
*/
public interface ClusterMapper extends BaseMapper<Cluster> {
/**
* query cluster by name
*
* @param name name
* @return cluster
*/
Cluster queryByClusterName(@Param("clusterName") String name);
/**
* query cluster by code
*
* @param clusterCode clusterCode
* @return cluster
*/
Cluster queryByClusterCode(@Param("clusterCode") Long clusterCode);
/**
* query all cluster list
* @return cluster list
*/
List<Cluster> queryAllClusterList();
/**
* cluster page
* @param page page
* @param searchName searchName
* @return cluster IPage
*/
IPage<Cluster> queryClusterListPaging(IPage<Cluster> page, @Param("searchName") String searchName);
/**
* delete cluster by code
*
* @param code code
* @return int
*/
int deleteByCode(@Param("code") Long code);
}

55
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ClusterMapper.xml

@ -0,0 +1,55 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ClusterMapper">
<sql id="baseSql">
id, code, name, config, description, operator, create_time, update_time
</sql>
<select id="queryByClusterName" resultType="org.apache.dolphinscheduler.dao.entity.Cluster">
select
<include refid="baseSql"/>
from t_ds_cluster
WHERE name = #{clusterName}
</select>
<select id="queryAllClusterList" resultType="org.apache.dolphinscheduler.dao.entity.Cluster">
select
<include refid="baseSql"/>
from t_ds_cluster
order by create_time desc
</select>
<select id="queryClusterListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Cluster">
select
<include refid="baseSql"/>
from t_ds_cluster
where 1=1
<if test="searchName!=null and searchName != ''">
and name like concat('%', #{searchName}, '%')
</if>
order by create_time desc
</select>
<select id="queryByClusterCode" resultType="org.apache.dolphinscheduler.dao.entity.Cluster">
select
<include refid="baseSql"/>
from t_ds_cluster
where code = #{clusterCode}
</select>
<delete id="deleteByCode">
delete from t_ds_cluster where code = #{code}
</delete>
</mapper>

23
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_h2.sql

@ -1963,3 +1963,26 @@ CREATE TABLE t_ds_alert_send_status
UNIQUE KEY alert_send_status_unique (alert_id,alert_plugin_instance_id)
);
--
-- Table structure for table t_ds_cluster
--
DROP TABLE IF EXISTS t_ds_cluster CASCADE;
CREATE TABLE t_ds_cluster
(
id int NOT NULL AUTO_INCREMENT,
code bigint(20) NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (id),
UNIQUE KEY cluster_name_unique (name),
UNIQUE KEY cluster_code_unique (code)
);
INSERT INTO `t_ds_cluster`
(`id`,`code`,`name`,`config`,`description`,`operator`,`create_time`,`update_time`)
VALUES (100, 100, 'ds_null_k8s', '{"k8s":"ds_null_k8s"}', 'test', 1, '2021-03-03 11:31:24.0', '2021-03-03 11:31:24.0');

19
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql

@ -1938,3 +1938,22 @@ CREATE TABLE t_ds_alert_send_status (
PRIMARY KEY (`id`),
UNIQUE KEY `alert_send_status_unique` (`alert_id`,`alert_plugin_instance_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Table structure for t_ds_cluster
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_cluster`;
CREATE TABLE `t_ds_cluster` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`code` bigint(20) DEFAULT NULL COMMENT 'encoding',
`name` varchar(100) NOT NULL COMMENT 'cluster name',
`config` text NULL DEFAULT NULL COMMENT 'this config contains many cluster variables config',
`description` text NULL DEFAULT NULL COMMENT 'the details',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `cluster_name_unique` (`name`),
UNIQUE KEY `cluster_code_unique` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

20
dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_postgresql.sql

@ -1938,3 +1938,23 @@ CREATE TABLE t_ds_alert_send_status (
PRIMARY KEY (id),
CONSTRAINT alert_send_status_unique UNIQUE (alert_id,alert_plugin_instance_id)
);
--
-- Table structure for table t_ds_cluster
--
DROP TABLE IF EXISTS t_ds_cluster;
CREATE TABLE t_ds_cluster (
id serial NOT NULL,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT cluster_name_unique UNIQUE (name),
CONSTRAINT cluster_code_unique UNIQUE (code)
);

20
dolphinscheduler-dao/src/main/resources/sql/upgrade/3.0.0_schema/mysql/dolphinscheduler_ddl.sql

@ -280,4 +280,22 @@ CREATE TABLE `t_ds_relation_namespace_user` (
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`),
UNIQUE KEY `namespace_user_unique` (`user_id`,`namespace_id`)
) ENGINE=InnoDB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8;
) ENGINE=InnoDB AUTO_INCREMENT= 1 DEFAULT CHARSET= utf8;
-- ----------------------------
-- Table structure for t_ds_cluster
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_cluster`;
CREATE TABLE `t_ds_cluster` (
`id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
`code` bigint(20) DEFAULT NULL COMMENT 'encoding',
`name` varchar(100) NOT NULL COMMENT 'cluster name',
`config` text NULL DEFAULT NULL COMMENT 'this config contains many cluster variables config',
`description` text NULL DEFAULT NULL COMMENT 'the details',
`operator` int(11) DEFAULT NULL COMMENT 'operator user id',
`create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
UNIQUE KEY `cluster_name_unique` (`name`),
UNIQUE KEY `cluster_code_unique` (`code`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;

14
dolphinscheduler-dao/src/main/resources/sql/upgrade/3.0.0_schema/postgresql/dolphinscheduler_ddl.sql

@ -249,6 +249,20 @@ EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_relation
CONSTRAINT namespace_user_unique UNIQUE (user_id,namespace_id)
)';
EXECUTE 'CREATE TABLE IF NOT EXISTS '|| quote_ident(v_schema) ||'."t_ds_cluster" (
id serial NOT NULL,
code bigint NOT NULL,
name varchar(100) DEFAULT NULL,
config text DEFAULT NULL,
description text,
operator int DEFAULT NULL,
create_time timestamp DEFAULT NULL,
update_time timestamp DEFAULT NULL,
PRIMARY KEY (id),
CONSTRAINT cluster_name_unique UNIQUE (name),
CONSTRAINT cluster_code_unique UNIQUE (code)
)';
return 'Success!';
exception when others then
---Raise EXCEPTION '(%)',SQLERRM;

191
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ClusterMapperTest.java

@ -0,0 +1,191 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.dao.BaseDaoTest;
import org.apache.dolphinscheduler.dao.entity.Cluster;
import java.util.Date;
import java.util.List;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
public class ClusterMapperTest extends BaseDaoTest {
@Autowired
ClusterMapper clusterMapper;
/**
* insert
*
* @return Cluster
*/
private Cluster insertOne() {
//insertOne
Cluster cluster = new Cluster();
cluster.setName("testCluster");
cluster.setCode(1L);
cluster.setOperator(1);
cluster.setConfig(getConfig());
cluster.setDescription(getDesc());
cluster.setCreateTime(new Date());
cluster.setUpdateTime(new Date());
clusterMapper.insert(cluster);
return cluster;
}
@Before
public void setUp() {
clearTestData();
}
@After
public void after() {
clearTestData();
}
public void clearTestData() {
clusterMapper.queryAllClusterList().stream().forEach(cluster -> {
clusterMapper.deleteByCode(cluster.getCode());
});
}
/**
* test update
*/
@Test
public void testUpdate() {
//insertOne
Cluster cluster = insertOne();
cluster.setDescription("new description info");
//update
int update = clusterMapper.updateById(cluster);
Assert.assertEquals(update, 1);
}
/**
* test delete
*/
@Test
public void testDelete() {
Cluster cluster = insertOne();
int delete = clusterMapper.deleteById(cluster.getId());
Assert.assertEquals(delete, 1);
}
/**
* test query
*/
@Test
public void testQuery() {
insertOne();
//query
List<Cluster> clusters = clusterMapper.selectList(null);
Assert.assertEquals(clusters.size(), 1);
}
/**
* test query cluster by name
*/
@Test
public void testQueryByClusterName() {
Cluster entity = insertOne();
Cluster cluster = clusterMapper.queryByClusterName(entity.getName());
Assert.assertEquals(entity.toString(),cluster.toString());
}
/**
* test query cluster by code
*/
@Test
public void testQueryByClusterCode() {
Cluster entity = insertOne();
Cluster cluster = clusterMapper.queryByClusterCode(entity.getCode());
Assert.assertEquals(entity.toString(),cluster.toString());
}
/**
* test query all clusters
*/
@Test
public void testQueryAllClusterList() {
Cluster entity = insertOne();
List<Cluster> clusters = clusterMapper.queryAllClusterList();
Assert.assertEquals(clusters.size(), 1);
Assert.assertEquals(entity.toString(),clusters.get(0).toString());
}
/**
* test query cluster list paging
*/
@Test
public void testQueryClusterListPaging() {
Cluster entity = insertOne();
Page<Cluster> page = new Page<>(1, 10);
IPage<Cluster> clusterIPage = clusterMapper.queryClusterListPaging(page,"");
List<Cluster> clusterList = clusterIPage.getRecords();
Assert.assertEquals(clusterList.size(), 1);
clusterIPage = clusterMapper.queryClusterListPaging(page,"abc");
clusterList = clusterIPage.getRecords();
Assert.assertEquals(clusterList.size(), 0);
}
/**
* test query all clusters
*/
@Test
public void testDeleteByCode() {
Cluster entity = insertOne();
int delete = clusterMapper.deleteByCode(entity.getCode());
Assert.assertEquals(delete, 1);
}
private String getDesc() {
return "create an cluster to test ";
}
/**
* create an cluster config
*/
private String getConfig() {
return "export HADOOP_HOME=/opt/hadoop-2.6.5\n"
+ "export HADOOP_CONF_DIR=/etc/hadoop/conf\n"
+ "export SPARK_HOME1=/opt/soft/spark1\n"
+ "export SPARK_HOME2=/opt/soft/spark2\n"
+ "export PYTHON_HOME=/opt/soft/python\n"
+ "export JAVA_HOME=/opt/java/jdk1.8.0_181-amd64\n"
+ "export HIVE_HOME=/opt/soft/hive\n"
+ "export FLINK_HOME=/opt/soft/flink\n"
+ "export DATAX_HOME=/opt/soft/datax\n"
+ "export YARN_CONF_DIR=\"/etc/hadoop/conf\"\n"
+ "\n"
+ "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME/bin:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$DATAX_HOME/bin:$PATH\n"
+ "\n"
+ "export HADOOP_CLASSPATH=`hadoop classpath`\n"
+ "\n"
+ "#echo \"HADOOP_CLASSPATH=\"$HADOOP_CLASSPATH";
}
}

123
dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/cases/ClusterE2ETest.java

@ -0,0 +1,123 @@
/*
* Licensed to Apache Software Foundation (ASF) under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Apache Software Foundation (ASF) licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.dolphinscheduler.e2e.cases;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
import org.apache.dolphinscheduler.e2e.core.DolphinScheduler;
import org.apache.dolphinscheduler.e2e.pages.LoginPage;
import org.apache.dolphinscheduler.e2e.pages.security.ClusterPage;
import org.apache.dolphinscheduler.e2e.pages.security.SecurityPage;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.openqa.selenium.By;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.remote.RemoteWebDriver;
@DolphinScheduler(composeFiles = "docker/basic/docker-compose.yaml")
class ClusterE2ETest {
private static final String clusterName = "test_cluster_name";
private static final String clusterConfig = "test_cluster_config";
private static final String clusterDesc = "test_cluster_desc";
private static final String editClusterName = "edit_cluster_name";
private static final String editClusterConfig = "edit_cluster_config";
private static final String editClusterDesc = "edit_cluster_desc";
private static RemoteWebDriver browser;
@BeforeAll
public static void setup() {
new LoginPage(browser)
.login("admin", "dolphinscheduler123")
.goToNav(SecurityPage.class)
.goToTab(ClusterPage.class)
;
}
@Test
@Order(10)
void testCreateCluster() {
final ClusterPage page = new ClusterPage(browser);
page.create(clusterName, clusterConfig, clusterDesc);
await().untilAsserted(() -> {
browser.navigate().refresh();
assertThat(page.clusterList())
.as("Cluster list should contain newly-created cluster")
.extracting(WebElement::getText)
.anyMatch(it -> it.contains(clusterName));
});
}
@Test
@Order(20)
void testCreateDuplicateCluster() {
final ClusterPage page = new ClusterPage(browser);
page.create(clusterName, clusterConfig, clusterDesc);
await().untilAsserted(() ->
assertThat(browser.findElement(By.tagName("body")).getText())
.contains("already exists")
);
page.createClusterForm().buttonCancel().click();
}
@Test
@Order(30)
void testEditCluster() {
final ClusterPage page = new ClusterPage(browser);
page.update(clusterName, editClusterName, editClusterConfig, editClusterDesc);
await().untilAsserted(() -> {
browser.navigate().refresh();
assertThat(page.clusterList())
.as("Cluster list should contain newly-modified cluster")
.extracting(WebElement::getText)
.anyMatch(it -> it.contains(editClusterName));
});
}
@Test
@Order(40)
void testDeleteCluster() {
final ClusterPage page = new ClusterPage(browser);
page.delete(editClusterName);
await().untilAsserted(() -> {
browser.navigate().refresh();
assertThat(
page.clusterList()
)
.as("Cluster list should not contain deleted cluster")
.noneMatch(
it -> it.getText().contains(clusterName) || it.getText().contains(editClusterName)
);
});
}
}

151
dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/pages/security/ClusterPage.java

@ -0,0 +1,151 @@
/*
* Licensed to Apache Software Foundation (ASF) under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Apache Software Foundation (ASF) licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.dolphinscheduler.e2e.pages.security;
import org.apache.dolphinscheduler.e2e.pages.common.NavBarPage;
import java.util.List;
import org.openqa.selenium.By;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.Keys;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.support.FindBy;
import org.openqa.selenium.support.FindBys;
import org.openqa.selenium.support.PageFactory;
import org.openqa.selenium.support.ui.ExpectedConditions;
import org.openqa.selenium.support.ui.WebDriverWait;
import lombok.Getter;
@Getter
public final class ClusterPage extends NavBarPage implements SecurityPage.Tab {
@FindBy(className = "btn-create-cluster")
private WebElement buttonCreateCluster;
@FindBy(className = "items")
private List<WebElement> clusterList;
@FindBys({
@FindBy(className = "n-popconfirm__action"),
@FindBy(className = "n-button--primary-type"),
})
private WebElement buttonConfirm;
private final ClusterForm createClusterForm;
private final ClusterForm editClusterForm;
public ClusterPage(RemoteWebDriver driver) {
super(driver);
createClusterForm = new ClusterForm();
editClusterForm = new ClusterForm();
}
public ClusterPage create(String name, String config, String desc) {
buttonCreateCluster().click();
createClusterForm().inputClusterName().sendKeys(name);
createClusterForm().inputClusterConfig().sendKeys(config);
createClusterForm().inputClusterDesc().sendKeys(desc);
createClusterForm().buttonSubmit().click();
return this;
}
public ClusterPage update(String oldName, String name, String config, String desc) {
clusterList()
.stream()
.filter(it -> it.findElement(By.className("cluster-name")).getAttribute("innerHTML").contains(oldName))
.flatMap(it -> it.findElements(By.className("edit")).stream())
.filter(WebElement::isDisplayed)
.findFirst()
.orElseThrow(() -> new RuntimeException("No edit button in cluster list"))
.click();
editClusterForm().inputClusterName().sendKeys(Keys.CONTROL + "a");
editClusterForm().inputClusterName().sendKeys(Keys.BACK_SPACE);
editClusterForm().inputClusterName().sendKeys(name);
editClusterForm().inputClusterConfig().sendKeys(Keys.CONTROL + "a");
editClusterForm().inputClusterConfig().sendKeys(Keys.BACK_SPACE);
editClusterForm().inputClusterConfig().sendKeys(config);
editClusterForm().inputClusterDesc().sendKeys(Keys.CONTROL + "a");
editClusterForm().inputClusterDesc().sendKeys(Keys.BACK_SPACE);
editClusterForm().inputClusterDesc().sendKeys(desc);
editClusterForm().buttonSubmit().click();
return this;
}
public ClusterPage delete(String name) {
clusterList()
.stream()
.filter(it -> it.getText().contains(name))
.flatMap(it -> it.findElements(By.className("delete")).stream())
.filter(WebElement::isDisplayed)
.findFirst()
.orElseThrow(() -> new RuntimeException("No delete button in cluster list"))
.click();
((JavascriptExecutor) driver).executeScript("arguments[0].click();", buttonConfirm());
return this;
}
@Getter
public class ClusterForm {
ClusterForm() {
PageFactory.initElements(driver, this);
}
@FindBys({
@FindBy(className = "input-cluster-name"),
@FindBy(tagName = "input"),
})
private WebElement inputClusterName;
@FindBys({
@FindBy(className = "input-cluster-config"),
@FindBy(tagName = "textarea"),
})
private WebElement inputClusterConfig;
@FindBys({
@FindBy(className = "input-cluster-desc"),
@FindBy(tagName = "input"),
})
private WebElement inputClusterDesc;
@FindBys({
@FindBy(className = "n-base-selection-tags"),
@FindBy(className = "n-tag__content"),
})
private WebElement selectedWorkerGroup;
@FindBy(className = "btn-submit")
private WebElement buttonSubmit;
@FindBy(className = "btn-cancel")
private WebElement buttonCancel;
}
}

12
dolphinscheduler-e2e/dolphinscheduler-e2e-case/src/test/java/org/apache/dolphinscheduler/e2e/pages/security/SecurityPage.java

@ -52,9 +52,12 @@ public class SecurityPage extends NavBarPage implements NavBarItem {
private WebElement menuEnvironmentManage;
@FindBy(css = ".tab-vertical > .n-menu-item:nth-child(8) > .n-menu-item-content")
private WebElement menuNamespaceManage;
private WebElement menuClusterManage;
@FindBy(css = ".tab-vertical > .n-menu-item:nth-child(9) > .n-menu-item-content")
private WebElement menuNamespaceManage;
@FindBy(css = ".tab-vertical > .n-menu-item:nth-child(10) > .n-menu-item-content")
private WebElement menuTokenManage;
public SecurityPage(RemoteWebDriver driver) {
@ -97,6 +100,13 @@ public class SecurityPage extends NavBarPage implements NavBarItem {
return tab.cast(new EnvironmentPage(driver));
}
if (tab == ClusterPage.class) {
new WebDriverWait(driver, 10).until(ExpectedConditions.urlContains("/security"));
new WebDriverWait(driver, 60).until(ExpectedConditions.elementToBeClickable(menuClusterManage));
((JavascriptExecutor) driver).executeScript("arguments[0].click();", menuClusterManage());
return tab.cast(new ClusterPage(driver));
}
if (tab == TokenPage.class) {
new WebDriverWait(driver, 10).until(ExpectedConditions.urlContains("/security"));
new WebDriverWait(driver, 60).until(ExpectedConditions.elementToBeClickable(menuTokenManage));

8
dolphinscheduler-ui/src/layouts/content/use-dataList.ts

@ -46,7 +46,8 @@ import {
ContainerOutlined,
ApartmentOutlined,
BarsOutlined,
CloudServerOutlined
CloudServerOutlined,
ClusterOutlined
} from '@vicons/antd'
import { useRoute } from 'vue-router'
import { useUserStore } from '@/store/user/user'
@ -290,6 +291,11 @@ export function useDataList() {
key: '/security/environment-manage',
icon: renderIcon(EnvironmentOutlined)
},
{
label: t('menu.cluster_manage'),
key: '/security/cluster-manage',
icon: renderIcon(ClusterOutlined)
},
{
label: t('menu.k8s_namespace_manage'),
key: '/security/k8s-namespace-manage',

1
dolphinscheduler-ui/src/locales/en_US/menu.ts

@ -48,6 +48,7 @@ export default {
worker_group_manage: 'Worker Group Manage',
yarn_queue_manage: 'Yarn Queue Manage',
environment_manage: 'Environment Manage',
cluster_manage: 'Cluster Manage',
k8s_namespace_manage: 'K8S Namespace Manage',
token_manage: 'Token Manage',
task_group_manage: 'Task Group Manage',

32
dolphinscheduler-ui/src/locales/en_US/project.ts

@ -193,9 +193,9 @@ export default {
project_name: 'Project Name',
project_tips: 'Please select project name',
workflow_relation_no_data_result_title:
'Can not find any relations of workflows.',
'Can not find any relations of workflows.',
workflow_relation_no_data_result_desc:
'There is not any workflows. Please create a workflow, and then visit this page again.'
'There is not any workflows. Please create a workflow, and then visit this page again.'
},
task: {
cancel_full_screen: 'Cancel full screen',
@ -308,7 +308,7 @@ export default {
task_priority: 'Task priority',
worker_group: 'Worker group',
worker_group_tips:
'The Worker group no longer exists, please select the correct Worker group!',
'The Worker group no longer exists, please select the correct Worker group!',
environment_name: 'Environment Name',
task_group_name: 'Task group name',
task_group_queue_priority: 'Priority',
@ -335,11 +335,11 @@ export default {
success: 'Success',
failed: 'Failed',
backfill_tips:
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process',
task_instance_tips:
'The task has not been executed and cannot enter the sub-Process',
'The task has not been executed and cannot enter the sub-Process',
branch_tips:
'Cannot select the same node for successful branch flow and failed branch flow',
'Cannot select the same node for successful branch flow and failed branch flow',
timeout_alarm: 'Timeout alarm',
timeout_strategy: 'Timeout strategy',
timeout_strategy_tips: 'Timeout strategy must be selected',
@ -398,8 +398,8 @@ export default {
parallelism_tips: 'Please enter Parallelism',
parallelism_number_tips: 'Parallelism number should be positive integer',
parallelism_complement_tips:
'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' +
'set the complement task thread to a reasonable value to avoid too large impact on the server.',
'If there are a large number of tasks requiring complement, you can use the custom parallelism to ' +
'set the complement task thread to a reasonable value to avoid too large impact on the server.',
task_manager_number: 'TaskManager Number',
task_manager_number_tips: 'Please enter TaskManager number',
http_url: 'Http Url',
@ -434,7 +434,7 @@ export default {
procedure_method: 'SQL Statement',
procedure_method_tips: 'Please enter the procedure script',
procedure_method_snippet:
'--Please enter the procedure script \n\n--call procedure:call <procedure-name>[(<arg1>,<arg2>, ...)]\n\n--call function:?= call <procedure-name>[(<arg1>,<arg2>, ...)]',
'--Please enter the procedure script \n\n--call procedure:call <procedure-name>[(<arg1>,<arg2>, ...)]\n\n--call function:?= call <procedure-name>[(<arg1>,<arg2>, ...)]',
start: 'Start',
edit: 'Edit',
copy: 'Copy',
@ -500,7 +500,7 @@ export default {
sea_tunnel_master_url: 'Master URL',
sea_tunnel_queue: 'Queue',
sea_tunnel_master_url_tips:
'Please enter the master url, e.g., 127.0.0.1:7077',
'Please enter the master url, e.g., 127.0.0.1:7077',
add_pre_task_check_condition: 'Add pre task check condition',
switch_condition: 'Condition',
switch_branch_flow: 'Branch Flow',
@ -615,24 +615,24 @@ export default {
'Please enter the paragraph id of your zeppelin paragraph',
jupyter_conda_env_name: 'condaEnvName',
jupyter_conda_env_name_tips:
'Please enter the conda environment name of papermill',
'Please enter the conda environment name of papermill',
jupyter_input_note_path: 'inputNotePath',
jupyter_input_note_path_tips: 'Please enter the input jupyter note path',
jupyter_output_note_path: 'outputNotePath',
jupyter_output_note_path_tips: 'Please enter the output jupyter note path',
jupyter_parameters: 'parameters',
jupyter_parameters_tips:
'Please enter the parameters for jupyter parameterization',
'Please enter the parameters for jupyter parameterization',
jupyter_kernel: 'kernel',
jupyter_kernel_tips: 'Please enter the jupyter kernel name',
jupyter_engine: 'engine',
jupyter_engine_tips: 'Please enter the engine name',
jupyter_execution_timeout: 'executionTimeout',
jupyter_execution_timeout_tips:
'Please enter the execution timeout for each jupyter note cell',
'Please enter the execution timeout for each jupyter note cell',
jupyter_start_timeout: 'startTimeout',
jupyter_start_timeout_tips:
'Please enter the start timeout for jupyter kernel',
'Please enter the start timeout for jupyter kernel',
jupyter_others: 'others',
jupyter_others_tips:
'Please enter the other options you need for papermill',
@ -645,7 +645,7 @@ export default {
mlflow_isSearchParams: 'Search Parameters',
mlflow_dataPath: 'Data Path',
mlflow_dataPath_tips:
' The absolute path of the file or folder. Ends with .csv for file or contain train.csv and test.csv for folder',
' The absolute path of the file or folder. Ends with .csv for file or contain train.csv and test.csv for folder',
mlflow_dataPath_error_tips: ' data data can not be empty ',
mlflow_experimentName: 'Experiment Name',
mlflow_experimentName_tips: 'experiment_001',
@ -698,4 +698,4 @@ export default {
'Please enter threshold number is needed',
please_enter_comparison_title: 'please select comparison title'
}
}
}

20
dolphinscheduler-ui/src/locales/en_US/security.ts

@ -98,6 +98,26 @@ export default {
environment_description_tips: 'Please enter your environment description',
worker_group_tips: 'Please select worker group'
},
cluster: {
create_cluster: 'Create Cluster',
edit_cluster: 'Edit Cluster',
search_tips: 'Please enter keywords',
edit: 'Edit',
delete: 'Delete',
cluster_name: 'Cluster Name',
cluster_components: 'Cluster Components',
cluster_config: 'Cluster Config',
kubernetes_config: 'Kubernetes Config',
yarn_config: 'Yarn Config',
cluster_desc: 'Cluster Desc',
create_time: 'Create Time',
update_time: 'Update Time',
operation: 'Operation',
delete_confirm: 'Delete?',
cluster_name_tips: 'Please enter your cluster name',
cluster_config_tips: 'Please enter your cluster config',
cluster_description_tips: 'Please enter your cluster description'
},
token: {
create_token: 'Create Token',
edit_token: 'Edit Token',

1
dolphinscheduler-ui/src/locales/zh_CN/menu.ts

@ -48,6 +48,7 @@ export default {
worker_group_manage: 'Worker分组管理',
yarn_queue_manage: 'Yarn队列管理',
environment_manage: '环境管理',
cluster_manage: '集群管理',
k8s_namespace_manage: 'K8S命名空间管理',
token_manage: '令牌管理',
task_group_manage: '任务组管理',

8
dolphinscheduler-ui/src/locales/zh_CN/project.ts

@ -195,7 +195,7 @@ export default {
project_tips: '请选择项目',
workflow_relation_no_data_result_title: '工作流关系不存在',
workflow_relation_no_data_result_desc:
'目前没有任何工作流,请先创建工作流,再访问该页面'
'目前没有任何工作流,请先创建工作流,再访问该页面'
},
task: {
cancel_full_screen: '取消全屏',
@ -393,7 +393,7 @@ export default {
parallelism_tips: '请输入并行度',
parallelism_number_tips: '并行度必须为正整数',
parallelism_complement_tips:
'如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响',
'如果存在大量任务需要补数时,可以利用自定义并行度将补数的任务线程设置成合理的数值,避免对服务器造成过大的影响',
task_manager_number: 'TaskManager数量',
task_manager_number_tips: '请输入TaskManager数量',
http_url: '请求地址',
@ -428,7 +428,7 @@ export default {
procedure_method: 'SQL语句',
procedure_method_tips: '请输入存储脚本',
procedure_method_snippet:
'--请输入存储脚本 \n\n--调用存储过程: call <procedure-name>[(<arg1>,<arg2>, ...)] \n\n--调用存储函数:?= call <procedure-name>[(<arg1>,<arg2>, ...)]',
'--请输入存储脚本 \n\n--调用存储过程: call <procedure-name>[(<arg1>,<arg2>, ...)] \n\n--调用存储函数:?= call <procedure-name>[(<arg1>,<arg2>, ...)]',
start: '运行',
edit: '编辑',
copy: '复制节点',
@ -634,7 +634,7 @@ export default {
mlflow_isSearchParams: '是否搜索参数',
mlflow_dataPath: '数据路径',
mlflow_dataPath_tips:
' 文件/文件夹的绝对路径, 若文件需以.csv结尾, 文件夹需包含train.csv和test.csv ',
' 文件/文件夹的绝对路径, 若文件需以.csv结尾, 文件夹需包含train.csv和test.csv ',
mlflow_dataPath_error_tips: ' 数据路径不能为空 ',
mlflow_experimentName: '实验名称',
mlflow_experimentName_tips: 'experiment_001',

20
dolphinscheduler-ui/src/locales/zh_CN/security.ts

@ -98,6 +98,26 @@ export default {
environment_description_tips: '请输入环境描述',
worker_group_tips: '请选择Worker分组'
},
cluster: {
create_cluster: '创建集群',
edit_cluster: '编辑集群',
search_tips: '请输入关键词',
edit: '编辑',
delete: '删除',
cluster_name: '集群名称',
cluster_components: '集群模块',
cluster_config: '集群配置',
kubernetes_config: 'Kubernetes配置',
yarn_config: 'Yarn配置',
cluster_desc: '集群描述',
create_time: '创建时间',
update_time: '更新时间',
operation: '操作',
delete_confirm: '确定删除吗?',
cluster_name_tips: '请输入集群名',
cluster_config_tips: '请输入集群配置',
cluster_description_tips: '请输入集群描述'
},
token: {
create_token: '创建令牌',
edit_token: '编辑令牌',

11
dolphinscheduler-ui/src/router/modules/security.ts

@ -95,6 +95,17 @@ export default {
auth: ['ADMIN_USER']
}
},
{
path: '/security/cluster-manage',
name: 'cluster-manage',
component: components['security-cluster-manage'],
meta: {
title: '集群管理',
activeMenu: 'security',
showSide: true,
auth: ['ADMIN_USER']
}
},
{
path: '/security/token-manage',
name: 'token-manage',

80
dolphinscheduler-ui/src/service/modules/cluster/index.ts

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { axios } from '@/service/service'
import {
ClusterReq,
ClusterCodeReq,
ClusterNameReq,
ListReq,
CodeReq
} from './types'
export function createCluster(data: ClusterReq): any {
return axios({
url: '/cluster/create',
method: 'post',
data
})
}
export function deleteClusterByCode(data: ClusterCodeReq): any {
return axios({
url: '/cluster/delete',
method: 'post',
data
})
}
export function queryClusterListPaging(params: ListReq): any {
return axios({
url: '/cluster/list-paging',
method: 'get',
params
})
}
export function queryClusterByCode(params: ClusterCodeReq): any {
return axios({
url: '/cluster/query-by-code',
method: 'get',
params
})
}
export function queryAllClusterList(): any {
return axios({
url: '/cluster/query-cluster-list',
method: 'get'
})
}
export function updateCluster(data: ClusterReq & CodeReq): any {
return axios({
url: '/cluster/update',
method: 'post',
data
})
}
export function verifyCluster(data: ClusterNameReq): any {
return axios({
url: '/cluster/verify-cluster',
method: 'post',
data
})
}

72
dolphinscheduler-ui/src/service/modules/cluster/types.ts

@ -0,0 +1,72 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
interface ClusterReq {
config: string
name: string
description?: string
workerGroups?: string
}
interface ClusterCodeReq {
clusterCode: number
}
interface ClusterNameReq {
clusterName: string
}
interface ListReq {
pageNo: number
pageSize: number
searchVal?: string
}
interface CodeReq {
code: number
}
interface ClusterItem {
id: number
code: any
name: string
config: string
description: string
workerGroups: string[]
operator: number
createTime: string
updateTime: string
}
interface ClusterRes {
totalList: ClusterItem[]
total: number
totalPage: number
pageSize: number
currentPage: number
start: number
}
export {
ClusterReq,
ClusterCodeReq,
ClusterNameReq,
ListReq,
CodeReq,
ClusterItem,
ClusterRes
}

191
dolphinscheduler-ui/src/views/security/cluster-manage/components/cluster-modal.tsx

@ -0,0 +1,191 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { defineComponent, PropType, toRefs, watch } from 'vue'
import Modal from '@/components/modal'
import { NForm, NFormItem, NInput } from 'naive-ui'
import { useModal } from './use-modal'
import { useI18n } from 'vue-i18n'
const envK8sConfigPlaceholder = `apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CZJQ0FURS0tLS0tCg==
server: https://127.0.0.1:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CZJQ0FURS0tLS0tCg=
`
const envYarnConfigPlaceholder = 'In development...'
const ClusterModal = defineComponent({
name: 'ClusterModal',
props: {
showModalRef: {
type: Boolean as PropType<boolean>,
default: false
},
statusRef: {
type: Number as PropType<number>,
default: 0
},
row: {
type: Object as PropType<any>,
default: {}
}
},
emits: ['cancelModal', 'confirmModal'],
setup(props, ctx) {
const { variables, handleValidate } = useModal(props, ctx)
const { t } = useI18n()
const cancelModal = () => {
if (props.statusRef === 0) {
variables.model.name = ''
variables.model.k8s_config = ''
variables.model.yarn_config = ''
variables.model.description = ''
}
ctx.emit('cancelModal', props.showModalRef)
}
const confirmModal = () => {
handleValidate(props.statusRef)
}
const setModal = (row: any) => {
variables.model.code = row.code
variables.model.name = row.name
if (row.config) {
const config = JSON.parse(row.config)
variables.model.k8s_config = config.k8s || ''
variables.model.yarn_config = config.yarn || ''
} else {
variables.model.k8s_config = ''
variables.model.yarn_config = ''
}
variables.model.description = row.description
}
watch(
() => props.statusRef,
() => {
if (props.statusRef === 0) {
variables.model.name = ''
variables.model.k8s_config = ''
variables.model.yarn_config = ''
variables.model.description = ''
} else {
setModal(props.row)
}
}
)
watch(
() => props.row,
() => {
setModal(props.row)
}
)
return { t, ...toRefs(variables), cancelModal, confirmModal }
},
render() {
const { t } = this
return (
<div>
<Modal
title={
this.statusRef === 0
? t('security.cluster.create_cluster')
: t('security.cluster.edit_cluster')
}
show={this.showModalRef}
onCancel={this.cancelModal}
onConfirm={this.confirmModal}
confirmDisabled={!this.model.name || !this.model.description}
confirmClassName='btn-submit'
cancelClassName='btn-cancel'
confirmLoading={this.saving}
>
{{
default: () => (
<NForm model={this.model} rules={this.rules} ref='clusterFormRef'>
<NFormItem
label={t('security.cluster.cluster_name')}
path='name'
>
<NInput
class='input-cluster-name'
placeholder={t('security.cluster.cluster_name_tips')}
v-model={[this.model.name, 'value']}
/>
</NFormItem>
<NFormItem
label={t('security.cluster.kubernetes_config')}
path='k8s_config'
>
<NInput
class='input-cluster-config'
placeholder={envK8sConfigPlaceholder}
type='textarea'
autosize={{ minRows: 16 }}
v-model={[this.model.k8s_config, 'value']}
/>
</NFormItem>
<NFormItem
label={t('security.cluster.yarn_config')}
path='yarn_config'
>
<NInput
class='input-yarn-config'
placeholder={envYarnConfigPlaceholder}
disabled={true}
v-model={[this.model.yarn_config, 'value']}
/>
</NFormItem>
<NFormItem
label={t('security.cluster.cluster_desc')}
path='description'
>
<NInput
class='input-cluster-desc'
placeholder={t('security.cluster.cluster_description_tips')}
v-model={[this.model.description, 'value']}
/>
</NFormItem>
</NForm>
)
}}
</Modal>
</div>
)
}
})
export default ClusterModal

118
dolphinscheduler-ui/src/views/security/cluster-manage/components/use-modal.ts

@ -0,0 +1,118 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { reactive, ref, SetupContext } from 'vue'
import { useI18n } from 'vue-i18n'
import {
verifyCluster,
createCluster,
updateCluster
} from '@/service/modules/cluster'
export function useModal(
props: any,
ctx: SetupContext<('cancelModal' | 'confirmModal')[]>
) {
const { t } = useI18n()
const variables = reactive({
clusterFormRef: ref(),
model: {
code: ref<number>(-1),
name: ref(''),
k8s_config: ref(''),
yarn_config: ref(''),
description: ref('')
},
saving: false,
rules: {
name: {
required: true,
trigger: ['input', 'blur'],
validator() {
if (variables.model.name === '') {
return new Error(t('security.cluster.cluster_name_tips'))
}
}
},
description: {
required: true,
trigger: ['input', 'blur'],
validator() {
if (variables.model.description === '') {
return new Error(t('security.cluster.cluster_description_tips'))
}
}
}
}
})
const handleValidate = async (statusRef: number) => {
await variables.clusterFormRef.validate()
if (variables.saving) return
variables.saving = true
try {
statusRef === 0 ? await submitClusterModal() : await updateClusterModal()
} finally {
variables.saving = false
}
}
const submitClusterModal = () => {
verifyCluster({ clusterName: variables.model.name }).then(() => {
const data = {
name: variables.model.name,
config: JSON.stringify({
k8s: variables.model.k8s_config,
yarn: variables.model.yarn_config
}),
description: variables.model.description
}
createCluster(data).then(() => {
variables.model.name = ''
variables.model.k8s_config = ''
variables.model.yarn_config = ''
variables.model.description = ''
ctx.emit('confirmModal', props.showModalRef)
})
})
}
const updateClusterModal = () => {
const data = {
code: variables.model.code,
name: variables.model.name,
config: JSON.stringify({
k8s: variables.model.k8s_config,
yarn: variables.model.yarn_config
}),
description: variables.model.description
}
updateCluster(data).then(() => {
ctx.emit('confirmModal', props.showModalRef)
})
}
return {
variables,
handleValidate
}
}

170
dolphinscheduler-ui/src/views/security/cluster-manage/index.tsx

@ -0,0 +1,170 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { defineComponent, onMounted, toRefs, watch } from 'vue'
import {
NButton,
NCard,
NDataTable,
NIcon,
NInput,
NPagination,
NSpace
} from 'naive-ui'
import { SearchOutlined } from '@vicons/antd'
import { useI18n } from 'vue-i18n'
import { useTable } from './use-table'
import Card from '@/components/card'
import ClusterModal from './components/cluster-modal'
const clusterManage = defineComponent({
name: 'cluster-manage',
setup() {
const { t } = useI18n()
const { variables, getTableData, createColumns } = useTable()
const requestData = () => {
getTableData({
pageSize: variables.pageSize,
pageNo: variables.page,
searchVal: variables.searchVal
})
}
const onUpdatePageSize = () => {
variables.page = 1
requestData()
}
const onSearch = () => {
variables.page = 1
requestData()
}
const handleModalChange = () => {
variables.showModalRef = true
variables.statusRef = 0
}
const onCancelModal = () => {
variables.showModalRef = false
}
const onConfirmModal = () => {
variables.showModalRef = false
requestData()
}
onMounted(() => {
createColumns(variables)
requestData()
})
watch(useI18n().locale, () => {
createColumns(variables)
})
return {
t,
...toRefs(variables),
requestData,
onCancelModal,
onConfirmModal,
onUpdatePageSize,
handleModalChange,
onSearch
}
},
render() {
const {
t,
requestData,
onUpdatePageSize,
onCancelModal,
onConfirmModal,
handleModalChange,
onSearch,
loadingRef
} = this
return (
<NSpace vertical>
<NCard>
<NSpace justify='space-between'>
<NButton
size='small'
type='primary'
onClick={handleModalChange}
class='btn-create-cluster'
>
{t('security.cluster.create_cluster')}
</NButton>
<NSpace>
<NInput
size='small'
clearable
v-model={[this.searchVal, 'value']}
placeholder={t('security.cluster.search_tips')}
/>
<NButton size='small' type='primary' onClick={onSearch}>
{{
icon: () => (
<NIcon>
<SearchOutlined />
</NIcon>
)
}}
</NButton>
</NSpace>
</NSpace>
</NCard>
<Card>
<NSpace vertical>
<NDataTable
loading={loadingRef}
row-class-name='items'
columns={this.columns}
data={this.tableData}
scrollX={this.tableWidth}
/>
<NSpace justify='center'>
<NPagination
v-model:page={this.page}
v-model:page-size={this.pageSize}
page-count={this.totalPage}
show-size-picker
page-sizes={[10, 30, 50]}
show-quick-jumper
onUpdatePage={requestData}
onUpdatePageSize={onUpdatePageSize}
/>
</NSpace>
</NSpace>
</Card>
<ClusterModal
showModalRef={this.showModalRef}
statusRef={this.statusRef}
row={this.row}
onCancelModal={onCancelModal}
onConfirmModal={onConfirmModal}
/>
</NSpace>
)
}
})
export default clusterManage

236
dolphinscheduler-ui/src/views/security/cluster-manage/use-table.ts

@ -0,0 +1,236 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { useAsyncState } from '@vueuse/core'
import { reactive, h, ref } from 'vue'
import { format } from 'date-fns'
import { NButton, NPopconfirm, NSpace, NTooltip, NTag, NIcon } from 'naive-ui'
import { useI18n } from 'vue-i18n'
import {
queryClusterListPaging,
deleteClusterByCode
} from '@/service/modules/cluster'
import { DeleteOutlined, EditOutlined } from '@vicons/antd'
import type { ClusterRes, ClusterItem } from '@/service/modules/cluster/types'
import { parseTime } from '@/common/common'
import {
COLUMN_WIDTH_CONFIG,
calculateTableWidth,
DefaultTableWidth
} from '@/common/column-width-config'
export function useTable() {
const { t } = useI18n()
const handleEdit = (row: any) => {
variables.showModalRef = true
variables.statusRef = 1
variables.row = row
}
const createColumns = (variables: any) => {
variables.columns = [
{
title: '#',
key: 'index',
render: (row: any, index: number) => index + 1,
...COLUMN_WIDTH_CONFIG['index']
},
{
title: t('security.cluster.cluster_name'),
key: 'name',
className: 'cluster-name',
...COLUMN_WIDTH_CONFIG['name']
},
{
title: t('security.cluster.cluster_components'),
key: 'config',
...COLUMN_WIDTH_CONFIG['tag'],
render: (row: ClusterItem) =>
h(NSpace, null, {
default: () => {
const components = []
if (row.config) {
const config = JSON.parse(row.config)
if (config.yarn) {
components.push('yarn')
}
if (config.k8s) {
components.push('kubernetes')
}
}
return components.map((item: any) =>
h(
NTag,
{ type: 'success', size: 'small' },
{ default: () => item }
)
)
}
})
},
{
title: t('security.cluster.cluster_desc'),
key: 'description',
...COLUMN_WIDTH_CONFIG['note']
},
{
title: t('security.cluster.create_time'),
key: 'createTime',
...COLUMN_WIDTH_CONFIG['time']
},
{
title: t('security.cluster.update_time'),
key: 'updateTime',
...COLUMN_WIDTH_CONFIG['time']
},
{
title: t('security.cluster.operation'),
key: 'operation',
...COLUMN_WIDTH_CONFIG['operation'](2),
render(row: any) {
return h(NSpace, null, {
default: () => [
h(
NTooltip,
{},
{
trigger: () =>
h(
NButton,
{
circle: true,
type: 'info',
size: 'small',
class: 'edit',
onClick: () => {
handleEdit(row)
}
},
{
icon: () =>
h(NIcon, null, { default: () => h(EditOutlined) })
}
),
default: () => t('security.cluster.edit')
}
),
h(
NPopconfirm,
{
onPositiveClick: () => {
handleDelete(row)
}
},
{
trigger: () =>
h(
NTooltip,
{},
{
trigger: () =>
h(
NButton,
{
circle: true,
type: 'error',
size: 'small',
class: 'delete'
},
{
icon: () =>
h(NIcon, null, {
default: () => h(DeleteOutlined)
})
}
),
default: () => t('security.cluster.delete')
}
),
default: () => t('security.cluster.delete_confirm')
}
)
]
})
}
}
]
if (variables.tableWidth) {
variables.tableWidth = calculateTableWidth(variables.columns)
}
}
const variables = reactive({
columns: [],
tableWidth: DefaultTableWidth,
tableData: [],
page: ref(1),
pageSize: ref(10),
searchVal: ref(null),
totalPage: ref(1),
showModalRef: ref(false),
statusRef: ref(0),
row: {},
loadingRef: ref(false)
})
const handleDelete = (row: any) => {
deleteClusterByCode({ clusterCode: row.code }).then(() => {
getTableData({
pageSize: variables.pageSize,
pageNo:
variables.tableData.length === 1 && variables.page > 1
? variables.page - 1
: variables.page,
searchVal: variables.searchVal
})
})
}
const getTableData = (params: any) => {
if (variables.loadingRef) return
variables.loadingRef = true
const { state } = useAsyncState(
queryClusterListPaging({ ...params }).then((res: ClusterRes) => {
variables.tableData = res.totalList.map((item, unused) => {
item.createTime = format(
parseTime(item.createTime),
'yyyy-MM-dd HH:mm:ss'
)
item.updateTime = format(
parseTime(item.updateTime),
'yyyy-MM-dd HH:mm:ss'
)
return {
...item
}
}) as any
variables.totalPage = res.totalPage
variables.loadingRef = false
}),
{}
)
return state
}
return {
variables,
getTableData,
createColumns
}
}
Loading…
Cancel
Save