mirror of
https://gitee.com/rainbond/Rainbond.git
synced 2024-11-30 10:48:15 +08:00
[ADD] add display node resources in grctl
This commit is contained in:
parent
acfb55ecc3
commit
bff2e520e7
@ -64,6 +64,21 @@ func (n *node) Get(node string) (*client.HostNode, *util.APIHandleError) {
|
||||
}
|
||||
return &gc, nil
|
||||
}
|
||||
|
||||
func (n *node) GetNodeResource(node string) (*client.NodeResource, *util.APIHandleError) {
|
||||
var res utilhttp.ResponseBody
|
||||
var gc client.NodeResource
|
||||
res.Bean = &gc
|
||||
code, err := n.DoRequest(n.prefix+"/"+node+"/resource", "GET", nil, &res)
|
||||
if err != nil {
|
||||
return nil, util.CreateAPIHandleError(code, err)
|
||||
}
|
||||
if code != 200 {
|
||||
return nil, util.CreateAPIHandleError(code, fmt.Errorf("Get database center configs code %d", code))
|
||||
}
|
||||
return &gc, nil
|
||||
}
|
||||
|
||||
func (n *node) GetNodeByRule(rule string) ([]*client.HostNode, *util.APIHandleError) {
|
||||
var res utilhttp.ResponseBody
|
||||
var gc []*client.HostNode
|
||||
@ -204,6 +219,7 @@ type TaskInterface interface {
|
||||
type NodeInterface interface {
|
||||
GetNodeByRule(rule string) ([]*client.HostNode, *util.APIHandleError)
|
||||
Get(node string) (*client.HostNode, *util.APIHandleError)
|
||||
GetNodeResource(node string) (*client.NodeResource, *util.APIHandleError)
|
||||
List() ([]*client.HostNode, *util.APIHandleError)
|
||||
Add(node *client.APIHostNode) *util.APIHandleError
|
||||
Up(nid string) *util.APIHandleError
|
||||
|
@ -62,10 +62,10 @@ func getClusterInfo(c *cli.Context) error {
|
||||
serviceTable2 := termtables.CreateTable()
|
||||
serviceTable2.AddHeaders("Service", "HealthyQuantity/Total", "Message")
|
||||
serviceStatusInfo := getServicesHealthy(list)
|
||||
status, message := clusterStatus(serviceStatusInfo["Role"],serviceStatusInfo["Ready"])
|
||||
status, message := clusterStatus(serviceStatusInfo["Role"], serviceStatusInfo["Ready"])
|
||||
serviceTable2.AddRow("\033[0;33;33mClusterStatus\033[0m", status, message)
|
||||
for name, v := range serviceStatusInfo {
|
||||
if name == "Role"{
|
||||
if name == "Role" {
|
||||
continue
|
||||
}
|
||||
status, message := summaryResult(v)
|
||||
@ -74,11 +74,11 @@ func getClusterInfo(c *cli.Context) error {
|
||||
fmt.Println(serviceTable2.Render())
|
||||
//show node detail
|
||||
serviceTable := termtables.CreateTable()
|
||||
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status")
|
||||
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status", "UsedCPU", "UseMemory")
|
||||
var rest []*client.HostNode
|
||||
for _, v := range list {
|
||||
if v.Role.HasRule("manage") {
|
||||
handleStatus(serviceTable, isNodeReady(v), v)
|
||||
handleStatus(serviceTable, isNodeReady(v), v, 0, 0)
|
||||
} else {
|
||||
rest = append(rest, v)
|
||||
}
|
||||
@ -87,7 +87,11 @@ func getClusterInfo(c *cli.Context) error {
|
||||
serviceTable.AddSeparator()
|
||||
}
|
||||
for _, v := range rest {
|
||||
handleStatus(serviceTable, isNodeReady(v), v)
|
||||
nodeResource, err := clients.RegionClient.Nodes().GetNodeResource(v.ID)
|
||||
handleErr(err)
|
||||
usedCpu := nodeResource.ReqCPU / float32(nodeResource.CapCPU) * 100
|
||||
useMemory := nodeResource.ReqMem / nodeResource.CapMem * 100
|
||||
handleStatus(serviceTable, isNodeReady(v), v, usedCpu, useMemory)
|
||||
}
|
||||
fmt.Println(serviceTable.Render())
|
||||
return nil
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"strings"
|
||||
"os/exec"
|
||||
"github.com/gosuri/uitable"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func handleErr(err *util.APIHandleError) {
|
||||
@ -98,7 +99,14 @@ func fileExist(path string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
func handleStatus(serviceTable *termtables.Table, ready bool, v *client.HostNode) {
|
||||
|
||||
func handleStatus(serviceTable *termtables.Table, ready bool, v *client.HostNode, usedCpu float32, usedMemory int) {
|
||||
cpu := "N/A"
|
||||
memory := "N/A"
|
||||
if usedCpu != 0 && usedMemory != 0 {
|
||||
cpu = fmt.Sprintf("%.2f", usedCpu) + "%"
|
||||
memory = strconv.Itoa(usedMemory) + "%"
|
||||
}
|
||||
var status string
|
||||
if ready == true {
|
||||
status = "\033[0;32;32m running(healthy) \033[0m"
|
||||
@ -119,12 +127,12 @@ func handleStatus(serviceTable *termtables.Table, ready bool, v *client.HostNode
|
||||
status = "\033[0;31;31m offline \033[0m"
|
||||
}
|
||||
if v.Role.HasRule("compute") && !v.Role.HasRule("manage") {
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status, cpu, memory)
|
||||
} else if v.Role.HasRule("manage") && !v.Role.HasRule("compute") {
|
||||
//scheduable="n/a"
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status, cpu, memory)
|
||||
} else if v.Role.HasRule("compute") && v.Role.HasRule("manage") {
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
|
||||
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status, cpu, memory)
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,11 +242,11 @@ func NewCmdNode() cli.Command {
|
||||
list, err := clients.RegionClient.Nodes().List()
|
||||
handleErr(err)
|
||||
serviceTable := termtables.CreateTable()
|
||||
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status")
|
||||
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status", "UsedCPU", "UseMemory")
|
||||
var rest []*client.HostNode
|
||||
for _, v := range list {
|
||||
if v.Role.HasRule("manage") {
|
||||
handleStatus(serviceTable, isNodeReady(v), v)
|
||||
handleStatus(serviceTable, isNodeReady(v), v, 0, 0)
|
||||
} else {
|
||||
rest = append(rest, v)
|
||||
}
|
||||
@ -247,7 +255,11 @@ func NewCmdNode() cli.Command {
|
||||
serviceTable.AddSeparator()
|
||||
}
|
||||
for _, v := range rest {
|
||||
handleStatus(serviceTable, isNodeReady(v), v)
|
||||
nodeResource, err := clients.RegionClient.Nodes().GetNodeResource(v.ID)
|
||||
handleErr(err)
|
||||
usedCpu := nodeResource.ReqCPU / float32(nodeResource.CapCPU) * 100
|
||||
useMemory := nodeResource.ReqMem / nodeResource.CapMem * 100
|
||||
handleStatus(serviceTable, isNodeReady(v), v, usedCpu, useMemory)
|
||||
}
|
||||
fmt.Println(serviceTable.Render())
|
||||
return nil
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
|
||||
"github.com/goodrain/rainbond/node/api/model"
|
||||
httputil "github.com/goodrain/rainbond/util/http"
|
||||
|
||||
)
|
||||
|
||||
//GetNodeDetails GetNodeDetails
|
||||
@ -399,4 +398,47 @@ func GetServicesHealthy(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
httputil.ReturnSuccess(r, w, healthMap)
|
||||
}
|
||||
}
|
||||
|
||||
//GetNodeResource
|
||||
func GetNodeResource(w http.ResponseWriter, r *http.Request) {
|
||||
nodeUID := strings.TrimSpace(chi.URLParam(r, "node_id"))
|
||||
hostNode, apierr := nodeService.GetNode(nodeUID)
|
||||
if apierr != nil {
|
||||
api.ReturnError(r, w, 500, apierr.Error())
|
||||
return
|
||||
}
|
||||
node, err := kubecli.GetNodeByName(hostNode.ID)
|
||||
if err != nil {
|
||||
api.ReturnError(r, w, 500, err.Error())
|
||||
return
|
||||
}
|
||||
var capCPU int64
|
||||
var capMem int64
|
||||
|
||||
capCPU = node.Status.Capacity.Cpu().Value()
|
||||
capMem = node.Status.Capacity.Memory().Value()
|
||||
|
||||
var cpuR int64
|
||||
var memR int64
|
||||
|
||||
pods, _ := kubecli.GetPodsByNodes(hostNode.ID)
|
||||
for _, pod := range pods {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
rc := c.Resources.Requests.Cpu().MilliValue()
|
||||
rm := c.Resources.Requests.Memory().Value()
|
||||
cpuR += rc
|
||||
memR += rm
|
||||
}
|
||||
}
|
||||
|
||||
podMemRequestMB := memR / 1024 / 1024
|
||||
result := &model.NodeResource{
|
||||
CapCPU: int(capCPU),
|
||||
CapMem: int(capMem) / 1024 / 1024,
|
||||
ReqCPU: float32(cpuR) / 1000,
|
||||
ReqMem: int(podMemRequestMB),
|
||||
}
|
||||
|
||||
api.ReturnSuccess(r, w, result)
|
||||
}
|
||||
|
@ -194,6 +194,14 @@ type ClusterResource struct {
|
||||
ReqDisk uint64 `json:"req_disk"`
|
||||
}
|
||||
|
||||
//node 资源
|
||||
type NodeResource struct {
|
||||
CapCPU int `json:"cap_cpu"`
|
||||
CapMem int `json:"cap_mem"`
|
||||
ReqCPU float32 `json:"req_cpu"`
|
||||
ReqMem int `json:"req_mem"`
|
||||
}
|
||||
|
||||
type FirstConfig struct {
|
||||
StorageMode string `json:"storage_mode"`
|
||||
StorageHost string `json:"storage_host,omitempty"`
|
||||
@ -450,9 +458,9 @@ type AlertingNameConfig struct {
|
||||
}
|
||||
|
||||
type RulesConfig struct {
|
||||
Alert string `yaml:"alert" json:"alert"`
|
||||
Expr string `yaml:"expr" json:"expr"`
|
||||
For string `yaml:"for" json:"for"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels"`
|
||||
Alert string `yaml:"alert" json:"alert"`
|
||||
Expr string `yaml:"expr" json:"expr"`
|
||||
For string `yaml:"for" json:"for"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations"`
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ func Routers(mode string) *chi.Mux {
|
||||
})
|
||||
r.Route("/nodes", func(r chi.Router) {
|
||||
r.Get("/fullres", controller.ClusterInfo)
|
||||
r.Get("/{node_id}/resource", controller.GetNodeResource)
|
||||
r.Get("/resources", controller.Resources)
|
||||
r.Get("/capres", controller.CapRes)
|
||||
r.Get("/", controller.GetNodes)
|
||||
|
@ -140,10 +140,10 @@ func (n *NodeService) GetServicesHealthy() (map[string][]map[string]string, *uti
|
||||
for _, v := range n.NodeStatus.Conditions {
|
||||
status, ok := StatusMap[string(v.Type)]
|
||||
if !ok {
|
||||
StatusMap[string(v.Type)] = []map[string]string{map[string]string{"type": string(v.Type), "status": string(v.Status), "message":string(v.Message), "hostname": n.HostName}}
|
||||
StatusMap[string(v.Type)] = []map[string]string{map[string]string{"type": string(v.Type), "status": string(v.Status), "message": string(v.Message), "hostname": n.HostName}}
|
||||
} else {
|
||||
list := status
|
||||
list = append(list, map[string]string{"type": string(v.Type), "status": string(v.Status), "message":string(v.Message), "hostname": n.HostName})
|
||||
list = append(list, map[string]string{"type": string(v.Type), "status": string(v.Status), "message": string(v.Message), "hostname": n.HostName})
|
||||
StatusMap[string(v.Type)] = list
|
||||
}
|
||||
|
||||
@ -170,9 +170,13 @@ func (n *NodeService) CordonNode(nodeID string, unschedulable bool) *utils.APIHa
|
||||
hostNode.Unschedulable = unschedulable
|
||||
//update node status
|
||||
if unschedulable {
|
||||
hostNode.Status = "unschedulable"
|
||||
hostNode.Status = Running
|
||||
hostNode.NodeStatus.Status = Running
|
||||
hostNode.Unschedulable = true
|
||||
} else {
|
||||
hostNode.Status = Running
|
||||
hostNode.NodeStatus.Status = Running
|
||||
hostNode.Unschedulable = false
|
||||
}
|
||||
if k8snode != nil {
|
||||
node, err := n.kubecli.CordonOrUnCordon(hostNode.ID, unschedulable)
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
client "github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
conf "github.com/goodrain/rainbond/cmd/node/option"
|
||||
store "github.com/goodrain/rainbond/node/core/store"
|
||||
"github.com/goodrain/rainbond/node/core/store"
|
||||
"github.com/pquerna/ffjson/ffjson"
|
||||
)
|
||||
|
||||
@ -78,6 +78,14 @@ type HostNode struct {
|
||||
ClusterNode
|
||||
}
|
||||
|
||||
//node 资源
|
||||
type NodeResource struct {
|
||||
CapCPU int `json:"cap_cpu"`
|
||||
CapMem int `json:"cap_mem"`
|
||||
ReqCPU float32 `json:"req_cpu"`
|
||||
ReqMem int `json:"req_mem"`
|
||||
}
|
||||
|
||||
//NodeStatus node status
|
||||
type NodeStatus struct {
|
||||
Status string `json:"status"` //installed running offline unknown
|
||||
|
Loading…
Reference in New Issue
Block a user