Rainbond/api/handler/cluster.go
2020-09-25 11:28:58 +08:00

307 lines
9.8 KiB
Go

package handler
import (
"fmt"
"runtime"
"time"
"github.com/sirupsen/logrus"
"github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util"
"github.com/shirou/gopsutil/disk"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
)
// ClusterHandler -
type ClusterHandler interface {
GetClusterInfo() (*model.ClusterResource, error)
MavenSettingAdd(ms *MavenSetting) *util.APIHandleError
MavenSettingList() (re []MavenSetting)
MavenSettingUpdate(ms *MavenSetting) *util.APIHandleError
MavenSettingDelete(name string) *util.APIHandleError
MavenSettingDetail(name string) (*MavenSetting, *util.APIHandleError)
}
// NewClusterHandler -
func NewClusterHandler(clientset *kubernetes.Clientset, RbdNamespace string) ClusterHandler {
return &clusterAction{
namespace: RbdNamespace,
clientset: clientset,
}
}
type clusterAction struct {
namespace string
clientset *kubernetes.Clientset
}
func (c *clusterAction) GetClusterInfo() (*model.ClusterResource, error) {
nodes, err := c.listNodes()
if err != nil {
return nil, fmt.Errorf("[GetClusterInfo] list nodes: %v", err)
}
var healthCapCPU, healthCapMem, unhealthCapCPU, unhealthCapMem int64
nodeLen := len(nodes)
_ = nodeLen
usedNodeList := make([]*corev1.Node, len(nodes))
for i := range nodes {
node := nodes[i]
if !isNodeReady(node) {
logrus.Debugf("[GetClusterInfo] node(%s) not ready", node.GetName())
unhealthCapCPU += node.Status.Allocatable.Cpu().Value()
unhealthCapMem += node.Status.Allocatable.Memory().Value()
continue
}
healthCapCPU += node.Status.Allocatable.Cpu().Value()
healthCapMem += node.Status.Allocatable.Memory().Value()
if node.Spec.Unschedulable == false {
usedNodeList[i] = node
}
}
var healthcpuR, healthmemR, unhealthCPUR, unhealthMemR int64
nodeAllocatableResourceList := make(map[string]*model.NodeResource, len(usedNodeList))
var maxAllocatableMemory *model.NodeResource
for i := range usedNodeList {
node := usedNodeList[i]
pods, err := c.listPods(node.Name)
if err != nil {
return nil, fmt.Errorf("list pods: %v", err)
}
nodeAllocatableResource := model.NewResource(node.Status.Allocatable)
for _, pod := range pods {
nodeAllocatableResource.AllowedPodNumber--
for _, c := range pod.Spec.Containers {
nodeAllocatableResource.Memory -= c.Resources.Requests.Memory().Value()
nodeAllocatableResource.MilliCPU -= c.Resources.Requests.Cpu().MilliValue()
nodeAllocatableResource.EphemeralStorage -= c.Resources.Requests.StorageEphemeral().Value()
if isNodeReady(node) {
healthcpuR += c.Resources.Requests.Cpu().MilliValue()
healthmemR += c.Resources.Requests.Memory().Value()
} else {
unhealthCPUR += c.Resources.Requests.Cpu().MilliValue()
unhealthMemR += c.Resources.Requests.Memory().Value()
}
}
}
nodeAllocatableResourceList[node.Name] = nodeAllocatableResource
// Gets the node resource with the maximum remaining scheduling memory
if maxAllocatableMemory == nil {
maxAllocatableMemory = nodeAllocatableResource
} else {
if nodeAllocatableResource.Memory > maxAllocatableMemory.Memory {
maxAllocatableMemory = nodeAllocatableResource
}
}
}
var diskstauts *disk.UsageStat
if runtime.GOOS != "windows" {
diskstauts, _ = disk.Usage("/grdata")
} else {
diskstauts, _ = disk.Usage(`z:\\`)
}
var diskCap, reqDisk uint64
if diskstauts != nil {
diskCap = diskstauts.Total
reqDisk = diskstauts.Used
}
result := &model.ClusterResource{
CapCPU: int(healthCapCPU + unhealthCapCPU),
CapMem: int(healthCapMem+unhealthCapMem) / 1024 / 1024,
HealthCapCPU: int(healthCapCPU),
HealthCapMem: int(healthCapMem) / 1024 / 1024,
UnhealthCapCPU: int(unhealthCapCPU),
UnhealthCapMem: int(unhealthCapMem) / 1024 / 1024,
ReqCPU: float32(healthcpuR+unhealthCPUR) / 1000,
ReqMem: int(healthmemR+unhealthMemR) / 1024 / 1024,
HealthReqCPU: float32(healthcpuR) / 1000,
HealthReqMem: int(healthmemR) / 1024 / 1024,
UnhealthReqCPU: float32(unhealthCPUR) / 1000,
UnhealthReqMem: int(unhealthMemR) / 1024 / 1024,
ComputeNode: len(nodes),
CapDisk: diskCap,
ReqDisk: reqDisk,
MaxAllocatableMemoryNodeResource: maxAllocatableMemory,
}
result.AllNode = len(nodes)
for _, node := range nodes {
if !isNodeReady(node) {
result.NotReadyNode++
}
}
return result, nil
}
func (c *clusterAction) listNodes() ([]*corev1.Node, error) {
opts := metav1.ListOptions{}
nodeList, err := c.clientset.CoreV1().Nodes().List(opts)
if err != nil {
return nil, err
}
var nodes []*corev1.Node
for idx := range nodeList.Items {
node := &nodeList.Items[idx]
// check if node contains taints
if containsTaints(node) {
logrus.Debugf("[GetClusterInfo] node(%s) contains NoSchedule taints", node.GetName())
continue
}
nodes = append(nodes, node)
}
return nodes, nil
}
func isNodeReady(node *corev1.Node) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == corev1.NodeReady && cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}
func containsTaints(node *corev1.Node) bool {
for _, taint := range node.Spec.Taints {
if taint.Effect == corev1.TaintEffectNoSchedule {
return true
}
}
return false
}
func (c *clusterAction) listPods(nodeName string) (pods []corev1.Pod, err error) {
podList, err := c.clientset.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
if err != nil {
return pods, err
}
return podList.Items, nil
}
//MavenSetting maven setting
type MavenSetting struct {
Name string `json:"name" validate:"required"`
CreateTime string `json:"create_time"`
UpdateTime string `json:"update_time"`
Content string `json:"content" validate:"required"`
}
//MavenSettingList maven setting list
func (c *clusterAction) MavenSettingList() (re []MavenSetting) {
cms, err := c.clientset.CoreV1().ConfigMaps(c.namespace).List(metav1.ListOptions{
LabelSelector: "configtype=mavensetting",
})
if err != nil {
logrus.Errorf("list maven setting config list failure %s", err.Error())
}
for _, sm := range cms.Items {
re = append(re, MavenSetting{
Name: sm.Name,
CreateTime: sm.CreationTimestamp.Format(time.RFC3339),
UpdateTime: sm.Labels["updateTime"],
Content: sm.Data["mavensetting"],
})
}
return
}
//MavenSettingAdd maven setting add
func (c *clusterAction) MavenSettingAdd(ms *MavenSetting) *util.APIHandleError {
config := &corev1.ConfigMap{}
config.Name = ms.Name
config.Namespace = c.namespace
config.Labels = map[string]string{
"creator": "Rainbond",
"configtype": "mavensetting",
}
config.Annotations = map[string]string{
"updateTime": time.Now().Format(time.RFC3339),
}
config.Data = map[string]string{
"mavensetting": ms.Content,
}
_, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Create(config)
if err != nil {
if apierrors.IsAlreadyExists(err) {
return &util.APIHandleError{Code: 400, Err: fmt.Errorf("setting name is exist")}
}
logrus.Errorf("create maven setting configmap failure %s", err.Error())
return &util.APIHandleError{Code: 500, Err: fmt.Errorf("create setting config failure")}
}
ms.CreateTime = time.Now().Format(time.RFC3339)
ms.UpdateTime = time.Now().Format(time.RFC3339)
return nil
}
//MavenSettingUpdate maven setting file update
func (c *clusterAction) MavenSettingUpdate(ms *MavenSetting) *util.APIHandleError {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(ms.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting name is not exist")}
}
logrus.Errorf("get maven setting config list failure %s", err.Error())
return &util.APIHandleError{Code: 400, Err: fmt.Errorf("get setting failure")}
}
if sm.Data == nil {
sm.Data = make(map[string]string)
}
if sm.Annotations == nil {
sm.Annotations = make(map[string]string)
}
sm.Data["mavensetting"] = ms.Content
sm.Annotations["updateTime"] = time.Now().Format(time.RFC3339)
if _, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Update(sm); err != nil {
logrus.Errorf("update maven setting configmap failure %s", err.Error())
return &util.APIHandleError{Code: 500, Err: fmt.Errorf("update setting config failure")}
}
ms.UpdateTime = sm.Annotations["updateTime"]
ms.CreateTime = sm.CreationTimestamp.Format(time.RFC3339)
return nil
}
//MavenSettingDelete maven setting file delete
func (c *clusterAction) MavenSettingDelete(name string) *util.APIHandleError {
err := c.clientset.CoreV1().ConfigMaps(c.namespace).Delete(name, &metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting not found")}
}
logrus.Errorf("delete maven setting config list failure %s", err.Error())
return &util.APIHandleError{Code: 500, Err: fmt.Errorf("setting delete failure")}
}
return nil
}
//MavenSettingDetail maven setting file delete
func (c *clusterAction) MavenSettingDetail(name string) (*MavenSetting, *util.APIHandleError) {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(name, metav1.GetOptions{})
if err != nil {
logrus.Errorf("get maven setting config failure %s", err.Error())
return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting not found")}
}
return &MavenSetting{
Name: sm.Name,
CreateTime: sm.CreationTimestamp.Format(time.RFC3339),
UpdateTime: sm.Annotations["updateTime"],
Content: sm.Data["mavensetting"],
}, nil
}