Merge branch 'V3.7' of github.com:goodrain/rainbond into V3.7

This commit is contained in:
ysicing 2018-08-06 11:17:35 +08:00
commit 846bae2508
23 changed files with 398 additions and 143 deletions

View File

@ -72,11 +72,11 @@ doc:
@cd cmd/api && swagger generate spec -o ../../hack/contrib/docker/api/html/swagger.json
cert-ca:
@_output/3.7/rainbond-certutil create --is-ca --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem
@_output/3.7/rainbond-certutil create --is-ca --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem --domains region.goodrain.me
cert-server:
@_output/3.7/rainbond-certutil create --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem --crt-name=./test/ssl/server.pem --crt-key-name=./test/ssl/server.key.pem
@_output/3.7/rainbond-certutil create --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem --crt-name=./test/ssl/server.pem --crt-key-name=./test/ssl/server.key.pem --domains region.goodrain.me
cert-client:
@_output/3.7/rainbond-certutil create --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem --crt-name=./test/ssl/client.pem --crt-key-name=./test/ssl/client.key.pem
@_output/3.7/rainbond-certutil create --ca-name=./test/ssl/ca.pem --ca-key-name=./test/ssl/ca.key.pem --crt-name=./test/ssl/client.pem --crt-key-name=./test/ssl/client.key.pem --domains region.goodrain.me
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@echo "\033[36m 🤔 single plugin,how to work? \033[0m"

View File

@ -38,6 +38,7 @@ type TenantInterface interface {
GetSupportProtocols(w http.ResponseWriter, r *http.Request)
TransPlugins(w http.ResponseWriter, r *http.Request)
ServicesCount(w http.ResponseWriter, r *http.Request)
GetManyDeployVersion(w http.ResponseWriter, r *http.Request)
}
//ServiceInterface ServiceInterface
@ -68,6 +69,7 @@ type ServiceInterface interface {
Share(w http.ResponseWriter, r *http.Request)
ShareResult(w http.ResponseWriter, r *http.Request)
BuildVersionInfo(w http.ResponseWriter, r *http.Request)
GetDeployVersion(w http.ResponseWriter, r *http.Request)
}
//EntranceInterface EntranceInterface

View File

@ -99,7 +99,7 @@ func (v2 *V2) tenantNameRouter() chi.Router {
r.Delete("/groupapp/backups/{backup_id}", controller.DeleteBackup)
r.Post("/groupapp/backups/{backup_id}/restore", controller.Restore)
r.Get("/groupapp/backups/{backup_id}/restore/{restore_id}", controller.RestoreResult)
r.Post("/deployversions", controller.GetManager().GetManyDeployVersion)
return r
}
@ -131,8 +131,9 @@ func (v2 *V2) serviceRouter() chi.Router {
//构建版本列表
r.Get("/build-list", controller.GetManager().BuildList)
//构建版本操作
r.Get("/build-version/{build_version}",controller.GetManager().BuildVersionInfo)
r.Delete("/build-version/{build_version}",controller.GetManager().BuildVersionInfo)
r.Get("/build-version/{build_version}", controller.GetManager().BuildVersionInfo)
r.Get("/deployversion", controller.GetManager().GetDeployVersion)
r.Delete("/build-version/{build_version}", controller.GetManager().BuildVersionInfo)
//应用分享
r.Post("/share", controller.GetManager().Share)
r.Get("/share/{share_id}", controller.GetManager().ShareResult)

View File

@ -29,16 +29,17 @@ import (
"time"
"os"
"github.com/Sirupsen/logrus"
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
tutil "github.com/goodrain/rainbond/util"
httputil "github.com/goodrain/rainbond/util/http"
"github.com/thedevsaddam/govalidator"
"github.com/go-chi/chi"
"github.com/jinzhu/gorm"
"os"
"github.com/thedevsaddam/govalidator"
)
//TIMELAYOUT timelayout
@ -546,13 +547,13 @@ func (t *TenantStruct) BuildVersionIsExist(w http.ResponseWriter, r *http.Reques
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
buildVersion := chi.URLParam(r, "build_version")
_, err := db.GetManager().VersionInfoDao().GetVersionByDeployVersion(buildVersion, serviceID)
if err != nil && err!=gorm.ErrRecordNotFound {
if err != nil && err != gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 500, fmt.Sprintf("get build version status erro, %v", err))
return
}
if err == gorm.ErrRecordNotFound{
if err == gorm.ErrRecordNotFound {
statusMap["status"] = false
}else {
} else {
statusMap["status"] = true
}
httputil.ReturnSuccess(r, w, statusMap)
@ -609,6 +610,56 @@ func (t *TenantStruct) BuildVersionInfo(w http.ResponseWriter, r *http.Request)
}
//GetDeployVersion GetDeployVersion by service
func (t *TenantStruct) GetDeployVersion(w http.ResponseWriter, r *http.Request) {
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
version, err := db.GetManager().VersionInfoDao().GetVersionByDeployVersion(service.DeployVersion, service.ServiceID)
if err != nil && err != gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 500, fmt.Sprintf("get build version status erro, %v", err))
return
}
if err == gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 404, fmt.Sprintf("build version do not exist"))
return
}
httputil.ReturnSuccess(r, w, version)
}
//GetManyDeployVersion GetDeployVersion by some service id
func (t *TenantStruct) GetManyDeployVersion(w http.ResponseWriter, r *http.Request) {
rules := validator.MapData{
"service_ids": []string{"required"},
}
data, ok := httputil.ValidatorRequestMapAndErrorResponse(r, w, rules, nil)
if !ok {
return
}
serviceIDs, ok := data["service_ids"].([]interface{})
if !ok {
httputil.ReturnError(r, w, 400, fmt.Sprintf("service ids must be a array"))
return
}
var list []string
for _, s := range serviceIDs {
list = append(list, s.(string))
}
services, err := db.GetManager().TenantServiceDao().GetServiceByIDs(list)
if err != nil {
httputil.ReturnError(r, w, 500, fmt.Sprintf(err.Error()))
return
}
var versionList []*dbmodel.VersionInfo
for _, service := range services {
version, err := db.GetManager().VersionInfoDao().GetVersionByDeployVersion(service.DeployVersion, service.ServiceID)
if err != nil && err != gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 500, fmt.Sprintf("get build version status erro, %v", err))
return
}
versionList = append(versionList, version)
}
httputil.ReturnSuccess(r, w, versionList)
}
//DeployService DeployService
func (t *TenantStruct) DeployService(w http.ResponseWriter, r *http.Request) {
logrus.Debugf("trans deploy service")

View File

@ -76,10 +76,9 @@ func (s *ServiceAction) SetTenantServicePluginRelation(tenantID, serviceID strin
if err != nil {
return nil, util.CreateAPIHandleErrorFromDBError("get plugin by plugin id", err)
}
catePlugin := strings.Split(plugin.PluginModel, ":")[0]
crt, err := db.GetManager().TenantServicePluginRelationDao().CheckSomeModelLikePluginByServiceID(
serviceID,
catePlugin,
plugin.PluginModel,
)
if err != nil {
return nil, util.CreateAPIHandleErrorFromDBError("check plugin model", err)

View File

@ -54,7 +54,6 @@ type APIResult struct {
//Share 分享应用
func (s *ServiceShareHandle) Share(serviceID string, ss api_model.ServiceShare) (*APIResult, *util.APIHandleError) {
service, err := db.GetManager().TenantServiceDao().GetServiceByID(serviceID)
if err != nil {
return nil, util.CreateAPIHandleErrorFromDBError("查询应用出错", err)
@ -67,7 +66,7 @@ func (s *ServiceShareHandle) Share(serviceID string, ss api_model.ServiceShare)
shareID := uuid.NewV4().String()
var slugPath, shareImageName string
var bs api_db.BuildTaskStruct
if service.IsSlug() {
if version.DeliveredType == "slug" {
shareSlugInfo := ss.Body.SlugInfo
slugPath = service.CreateShareSlug(ss.Body.ServiceKey, shareSlugInfo.Namespace, ss.Body.AppVersion)
if ss.Body.SlugInfo.FTPHost == "" {

View File

@ -96,7 +96,6 @@ func (m *monitor) DelRule(name string) (*utilhttp.ResponseBody, *util.APIHandleE
func (m *monitor) AddRule(path string) (*utilhttp.ResponseBody, *util.APIHandleError) {
_, err := os.Stat(path)
if err != nil {
logrus.Info(err)
if !os.IsExist(err) {
return nil, util.CreateAPIHandleError(400, errors.New("file does not exist"))
}
@ -132,7 +131,6 @@ func (m *monitor) AddRule(path string) (*utilhttp.ResponseBody, *util.APIHandleE
func (m *monitor) RegRule(ruleName string, path string) (*utilhttp.ResponseBody, *util.APIHandleError) {
_, err := os.Stat(path)
if err != nil {
logrus.Info(err)
if !os.IsExist(err) {
return nil, util.CreateAPIHandleError(400, errors.New("file does not exist"))
}

View File

@ -48,6 +48,7 @@ type CertInformation struct {
IsCA bool
Names []pkix.AttributeTypeAndValue
IPAddresses []net.IP
Domains []string
}
//CreateCRT create crt
@ -60,10 +61,10 @@ func CreateCRT(RootCa *x509.Certificate, RootKey *rsa.PrivateKey, info CertInfor
var buf []byte
if RootCa == nil || RootKey == nil {
//创建自签名证书
//create ca cert
buf, err = x509.CreateCertificate(rand.Reader, Crt, Crt, &Key.PublicKey, Key)
} else {
//使用根证书签名
//create cert by ca
buf, err = x509.CreateCertificate(rand.Reader, Crt, RootCa, &Key.PublicKey, RootKey)
}
if err != nil {
@ -86,10 +87,11 @@ func write(filename, Type string, p []byte) error {
if err != nil {
return err
}
var b *pem.Block = &pem.Block{Bytes: p, Type: Type}
var b = &pem.Block{Bytes: p, Type: Type}
return pem.Encode(File, b)
}
//Parse Parse
func Parse(crtPath, keyPath string) (rootcertificate *x509.Certificate, rootPrivateKey *rsa.PrivateKey, err error) {
rootcertificate, err = ParseCrt(crtPath)
if err != nil {
@ -99,6 +101,7 @@ func Parse(crtPath, keyPath string) (rootcertificate *x509.Certificate, rootPriv
return
}
//ParseCrt ParseCrt
func ParseCrt(path string) (*x509.Certificate, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
@ -109,6 +112,7 @@ func ParseCrt(path string) (*x509.Certificate, error) {
return x509.ParseCertificate(p.Bytes)
}
//ParseKey ParseKey
func ParseKey(path string) (*rsa.PrivateKey, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
@ -130,13 +134,14 @@ func newCertificate(info CertInformation) *x509.Certificate {
Locality: info.Locality,
ExtraNames: info.Names,
},
NotBefore: time.Now(), //证书的开始时间
NotAfter: time.Now().AddDate(20, 0, 0), //证书的结束时间
BasicConstraintsValid: true, //基本的有效性约束
NotBefore: time.Now(), //start time
NotAfter: time.Now().AddDate(20, 0, 0), //end time
BasicConstraintsValid: true, //basic
IsCA: info.IsCA, //是否是根证书
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, //证书用途
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
EmailAddresses: info.EmailAddress,
IPAddresses: info.IPAddresses,
DNSNames: info.Domains,
}
}

View File

@ -37,6 +37,7 @@ type Config struct {
Address []string
IsCa bool
CAName, CAKeyName string
Domains []string
}
func main() {
@ -61,6 +62,11 @@ func main() {
Value: &cli.StringSlice{"127.0.0.1"},
Usage: "address list",
},
cli.StringSliceFlag{
Name: "domains",
Value: &cli.StringSlice{""},
Usage: "domain list",
},
cli.StringFlag{
Name: "ca-name",
Value: "./ca.pem",
@ -91,6 +97,7 @@ func parseConfig(ctx *cli.Context) Config {
c.CAName = ctx.String("ca-name")
c.CrtName = ctx.String("crt-name")
c.KeyName = ctx.String("crt-key-name")
c.Domains = ctx.StringSlice("domains")
c.IsCa = ctx.Bool("is-ca")
return c
}
@ -130,6 +137,7 @@ func (c *Config) CreateCertInformation() CertInformation {
CommonName: "rainbond",
CrtName: c.CrtName,
KeyName: c.KeyName,
Domains: c.Domains,
}
if c.IsCa {
baseinfo.CrtName = c.CAName

View File

@ -39,7 +39,7 @@ type Config struct {
StartArgs []string
ConfigFile string
AlertingRulesFile string
AlertManagerUrl string
AlertManagerUrl []string
LocalStoragePath string
Web Web
Tsdb Tsdb
@ -99,7 +99,7 @@ func NewConfig() *Config {
ConfigFile: "/etc/prometheus/prometheus.yml",
AlertingRulesFile: "/etc/prometheus/rules.yml",
AlertManagerUrl: "",
AlertManagerUrl: []string{},
LocalStoragePath: "/prometheusdata",
WebTimeout: "5m",
RemoteFlushDeadline: "1m",
@ -127,13 +127,12 @@ func NewConfig() *Config {
func (c *Config) AddFlag(cmd *pflag.FlagSet) {
cmd.StringVar(&c.EtcdEndpointsLine, "etcd-endpoints", c.EtcdEndpointsLine, "etcd endpoints list.")
cmd.StringVar(&c.AdvertiseAddr, "advertise-addr", c.AdvertiseAddr, "advertise address, and registry into etcd.")
cmd.StringSliceVar(&c.AlertManagerUrl, "alertmanager-address", c.AlertManagerUrl, "AlertManager url.")
}
func (c *Config) AddPrometheusFlag(cmd *pflag.FlagSet) {
cmd.StringVar(&c.ConfigFile, "config.file", c.ConfigFile, "Prometheus configuration file path.")
cmd.StringVar(&c.AlertManagerUrl, "alertmanager.url", c.AlertManagerUrl, "AlertManager url.")
cmd.StringVar(&c.AlertingRulesFile, "rules-config.file", c.AlertingRulesFile, "Prometheus alerting rules config file path.")
cmd.StringVar(&c.Web.ListenAddress, "web.listen-address", c.Web.ListenAddress, "Address to listen on for UI, API, and telemetry.")
@ -213,9 +212,6 @@ func (c *Config) CompleteConfig() {
if c.Web.EnableLifecycle {
defaultOptions += " --web.enable-lifecycle"
}
if c.AlertManagerUrl != "" {
defaultOptions += " --alertmanager.url="+c.AlertManagerUrl
}
args := strings.Split(defaultOptions, " ")
c.StartArgs = append(c.StartArgs, os.Args[0])

View File

@ -44,7 +44,7 @@ type TenantDao interface {
GetTenantByEid(eid string) ([]*model.Tenants, error)
GetPagedTenants(offset, len int) ([]*model.Tenants, error)
GetTenantIDsByNames(names []string) ([]string, error)
GetTenantByUUIDIsExist(uuid string) (bool)
GetTenantByUUIDIsExist(uuid string) bool
}
//TenantDao tenant dao
@ -69,13 +69,13 @@ type EventLogDao interface {
DeleteServiceEventLog(obj *model.EventLogMessage) error
GetAllServiceEventLog() ([]*model.EventLogMessage, error)
DeleteServiceEventLogByEventId(eventId string) error
}
//TenantServiceDao TenantServiceDao
type TenantServiceDao interface {
Dao
GetServiceByID(serviceID string) (*model.TenantServices, error)
GetServiceByIDs(serviceIDs []string) ([]*model.TenantServices, error)
GetServiceAliasByIDs(uids []string) ([]*model.TenantServices, error)
GetServiceByTenantIDAndServiceAlias(tenantID, serviceName string) (*model.TenantServices, error)
SetTenantServiceStatus(serviceID, status string) error
@ -338,7 +338,7 @@ type EventDao interface {
GetEventByEventID(eventID string) (*model.ServiceEvent, error)
GetEventByEventIDs(eventIDs []string) ([]*model.ServiceEvent, error)
GetEventByServiceID(serviceID string) ([]*model.ServiceEvent, error)
DelEventByServiceID(serviceID string) (error)
DelEventByServiceID(serviceID string) error
}
//VersionInfoDao VersionInfoDao

View File

@ -69,7 +69,7 @@ func (t *TenantDaoImpl) GetTenantByUUID(uuid string) (*model.Tenants, error) {
}
//GetTenantByUUIDIsExist 获取租户
func (t *TenantDaoImpl) GetTenantByUUIDIsExist(uuid string) (bool) {
func (t *TenantDaoImpl) GetTenantByUUIDIsExist(uuid string) bool {
var tenant model.Tenants
isExist := t.DB.Where("uuid = ?", uuid).First(&tenant).RecordNotFound()
return isExist
@ -156,7 +156,6 @@ func (t *TenantServicesDaoImpl) GetAllServicesID() ([]*model.TenantServices, err
return services, nil
}
//AddModel 添加租户应用
func (t *TenantServicesDaoImpl) AddModel(mo model.Interface) error {
service := mo.(*model.TenantServices)
@ -322,6 +321,18 @@ func (t *TenantServicesDaoImpl) GetServiceAliasByIDs(uids []string) ([]*model.Te
return services, nil
}
//GetServiceByIDs get some service by service ids
func (t *TenantServicesDaoImpl) GetServiceByIDs(uids []string) ([]*model.TenantServices, error) {
var services []*model.TenantServices
if err := t.DB.Where("service_id in (?)", uids).Find(&services).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return services, nil
}
return nil, err
}
return services, nil
}
//GetServiceByTenantIDAndServiceAlias 根据租户名和服务名
func (t *TenantServicesDaoImpl) GetServiceByTenantIDAndServiceAlias(tenantID, serviceName string) (*model.TenantServices, error) {
var service model.TenantServices
@ -427,7 +438,7 @@ func (t *TenantServicesDeleteImpl) GetTenantServicesDeleteByCreateTime(createTim
if err == gorm.ErrRecordNotFound {
return ServiceDel, nil
}
return nil,err
return nil, err
}
return ServiceDel, nil
}
@ -439,7 +450,6 @@ func (t *TenantServicesDeleteImpl) DeleteTenantServicesDelete(record *model.Tena
return nil
}
//TenantServicesPortDaoImpl 租户应用端口操作
type TenantServicesPortDaoImpl struct {
DB *gorm.DB

View File

@ -19,7 +19,6 @@
package clients
import (
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/api/region"
)
@ -28,7 +27,6 @@ var RegionClient region.Region
//InitRegionClient init region api client
func InitRegionClient(reg region.APIConf) (err error) {
logrus.Info(reg)
RegionClient, err = region.NewRegion(reg)
return
}

View File

@ -62,8 +62,10 @@ func getClusterInfo(c *cli.Context) error {
serviceTable2 := termtables.CreateTable()
serviceTable2.AddHeaders("Service", "HealthyQuantity/Total", "Message")
serviceStatusInfo := getServicesHealthy(list)
status, message := clusterStatus(serviceStatusInfo["Role"],serviceStatusInfo["Ready"])
serviceTable2.AddRow("\033[0;33;33mClusterStatus\033[0m", status, message)
for name, v := range serviceStatusInfo {
if name == string(client.NodeReady){
if name == "Role"{
continue
}
status, message := summaryResult(v)
@ -72,7 +74,7 @@ func getClusterInfo(c *cli.Context) error {
fmt.Println(serviceTable2.Render())
//show node detail
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status", "Alived", "Schedulable", "Ready")
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status")
var rest []*client.HostNode
for _, v := range list {
if v.Role.HasRule("manage") {
@ -94,6 +96,8 @@ func getClusterInfo(c *cli.Context) error {
func getServicesHealthy(nodes []*client.HostNode) (map[string][]map[string]string) {
StatusMap := make(map[string][]map[string]string, 30)
roleList := make([]map[string]string, 0, 10)
for _, n := range nodes {
for _, v := range n.NodeStatus.Conditions {
status, ok := StatusMap[string(v.Type)]
@ -106,7 +110,10 @@ func getServicesHealthy(nodes []*client.HostNode) (map[string][]map[string]strin
}
}
roleList = append(roleList, map[string]string{"role": n.Role.String(), "status": n.NodeStatus.Status})
}
StatusMap["Role"] = roleList
return StatusMap
}
@ -114,14 +121,14 @@ func summaryResult(list []map[string]string) (status string, errMessage string)
upNum := 0
err := ""
for _, v := range list {
if v["type"] == "OutOfDisk" ||v["type"] == "DiskPressure"||v["type"] == "MemoryPressure"||v["type"] == "InstallNotReady"{
if v["type"] == "OutOfDisk" || v["type"] == "DiskPressure" || v["type"] == "MemoryPressure" || v["type"] == "InstallNotReady" {
if v["status"] == "False" {
upNum += 1
} else {
err = ""
err = err + v["hostname"] + ":" + v["message"] + "/"
}
}else {
} else {
if v["status"] == "True" {
upNum += 1
} else {
@ -130,12 +137,69 @@ func summaryResult(list []map[string]string) (status string, errMessage string)
}
}
}
if upNum == len(list){
if upNum == len(list) {
status = "\033[0;32;32m" + strconv.Itoa(upNum) + "/" + strconv.Itoa(len(list)) + " \033[0m"
}else {
} else {
status = "\033[0;31;31m " + strconv.Itoa(upNum) + "/" + strconv.Itoa(len(list)) + " \033[0m"
}
errMessage = err
return
}
func handleRoleAndStatus(list []map[string]string) bool {
var computeFlag bool
var manageFlag bool
for _, v := range list {
if v["role"] == "compute" && v["status"] == "running" {
computeFlag = true
}
if v["role"] == "manage" && v["status"] == "running" {
manageFlag = true
}
if (v["role"] == "compute,manage" || v["role"] == "manage,compute") && v["status"] == "running" {
computeFlag = true
manageFlag = true
}
}
if computeFlag && manageFlag {
return true
} else {
return false
}
}
func handleNodeReady(list []map[string]string) bool {
trueNum := 0
for _, v := range list {
if v["status"] == "True" {
trueNum += 1
}
}
if trueNum == len(list) {
return true
} else {
return false
}
}
func clusterStatus(roleList []map[string]string, ReadyList []map[string]string) (string, string) {
var clusterStatus string
var errMessage string
readyStatus := handleNodeReady(ReadyList)
roleStatus := handleRoleAndStatus(roleList)
if readyStatus {
clusterStatus = "\033[0;32;32mhealthy\033[0m"
errMessage = ""
} else {
clusterStatus = "\033[0;31;31munhealthy\033[0m"
errMessage = "There is a service exception in the cluster"
}
if !roleStatus {
clusterStatus = "\033[0;33;33munavailable\033[0m"
errMessage = "No compute nodes or management nodes are available in the cluster"
}
return clusterStatus, errMessage
}

View File

@ -31,7 +31,6 @@ import (
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/grctl/clients"
"net/http"
)
//NewCmdInit grctl init
@ -120,7 +119,7 @@ func NewCmdInstallStatus() cli.Command {
func initCluster(c *cli.Context) {
// check if the rainbond is already installed
fmt.Println("Checking install enviremant.")
_, err := os.Stat("/tmp/rainbond.success")
_, err := os.Stat("/opt/rainbond/rainbond.success")
if err == nil {
println("Rainbond is already installed, if you whant reinstall, then please delete the file: /tmp/rainbond.success")
return
@ -154,18 +153,7 @@ func initCluster(c *cli.Context) {
return
}
_, err = http.Get("http://127.0.0.1:7070")
if err != nil {
println("Install complete but WEB UI is can not access, please manual check node status by `grctl node list`")
return
}
ioutil.WriteFile("/tmp/rainbond.success", []byte(c.String("repo_ver")), 0644)
fmt.Println("Init manage node successful, next you can:")
fmt.Println(" access WEB UI: http://127.0.0.1:7070")
fmt.Println(" add compute node: grctl node add -h")
fmt.Println(" online compute node: grctl node up -h")
ioutil.WriteFile("/opt/rainbond/rainbond.success", []byte(c.String("repo_ver")), 0644)
return
}

View File

@ -19,8 +19,6 @@
package cmd
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/apcera/termtables"
@ -32,6 +30,7 @@ import (
"os"
"strings"
"os/exec"
"github.com/gosuri/uitable"
)
func handleErr(err *util.APIHandleError) {
@ -100,19 +99,29 @@ func fileExist(path string) bool {
return false
}
func handleStatus(serviceTable *termtables.Table, ready bool, v *client.HostNode) {
var formatReady string
var status string
if ready == true {
status = "\033[0;32;32m running(healthy) \033[0m"
}
if ready == false {
formatReady = "\033[0;31;31m false \033[0m"
} else {
formatReady = "\033[0;32;32m true \033[0m"
status = "\033[0;32;32m running(unhealthy) \033[0m"
}
if v.Unschedulable == true {
status = "\033[0;32;32m running(unschedulable) \033[0m"
}
if v.Status == "unknown" {
status = "\033[0;31;31m unknown \033[0m"
}
if v.Status == "offline" {
status = "\033[0;31;31m offline \033[0m"
}
if v.Role.HasRule("compute") && !v.Role.HasRule("manage") {
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, v.Status, v.Alived, !v.Unschedulable, formatReady)
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
} else if v.Role.HasRule("manage") && !v.Role.HasRule("compute") {
//scheduable="n/a"
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, v.Status, v.Alived, "N/A", "N/A")
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
} else if v.Role.HasRule("compute") && v.Role.HasRule("manage") {
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, v.Status, v.Alived, !v.Unschedulable, formatReady)
serviceTable.AddRow(v.ID, v.InternalIP, v.HostName, v.Role.String(), v.Mode, status)
}
}
@ -184,13 +193,33 @@ func NewCmdNode() cli.Command {
v, err := clients.RegionClient.Nodes().Get(id)
handleErr(err)
nodeByte, _ := json.Marshal(v)
var out bytes.Buffer
error := json.Indent(&out, nodeByte, "", "\t")
if error != nil {
handleErr(util.CreateAPIHandleError(500, err))
}
fmt.Println(out.String())
table := uitable.New()
fmt.Printf("-------------------Node information-----------------------\n")
table.AddRow("status", v.NodeStatus.Status)
table.AddRow("unschedulable", v.Unschedulable)
table.AddRow("alived", v.Alived)
table.AddRow("uuid", v.ID)
table.AddRow("host_name", v.HostName)
table.AddRow("create_time", v.CreateTime)
table.AddRow("internal_ip", v.InternalIP)
table.AddRow("external_ip", v.ExternalIP)
table.AddRow("role", v.Role)
table.AddRow("mode", v.Mode)
table.AddRow("available_memory", v.AvailableMemory)
table.AddRow("available_cpu", v.AvailableCPU)
table.AddRow("pid", v.PID)
table.AddRow("version", v.Version)
table.AddRow("up", v.UpTime)
table.AddRow("down", v.DownTime)
table.AddRow("connected", v.Connected)
fmt.Println(table)
fmt.Printf("-------------------ervice health-----------------------\n")
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Title", "Result", "Message")
extractReady(serviceTable, v, "Ready")
handleResult(serviceTable, v)
fmt.Println(serviceTable.Render())
return nil
},
},
@ -202,7 +231,7 @@ func NewCmdNode() cli.Command {
list, err := clients.RegionClient.Nodes().List()
handleErr(err)
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status", "Alived", "Schedulable", "Ready")
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "NodeMode", "Status")
var rest []*client.HostNode
for _, v := range list {
if v.Role.HasRule("manage") {
@ -221,39 +250,6 @@ func NewCmdNode() cli.Command {
return nil
},
},
{
Name: "health",
Usage: "health hostID/internal ip",
Action: func(c *cli.Context) error {
Common(c)
id := c.Args().First()
if id == "" {
logrus.Errorf("need args")
return nil
}
nodes, err := clients.RegionClient.Nodes().List()
handleErr(err)
for _, v := range nodes {
if v.InternalIP == id {
id = v.ID
break
}
}
v, err := clients.RegionClient.Nodes().Get(id)
handleErr(err)
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Title", "Result", "Message")
serviceTable.AddRow("Uid:", v.ID, "")
serviceTable.AddRow("IP:", v.InternalIP, "")
serviceTable.AddRow("HostName:", v.HostName, "")
extractReady(serviceTable, v, "Ready")
handleResult(serviceTable, v)
fmt.Println(serviceTable.Render())
return nil
},
},
{
Name: "up",
Usage: "up hostID",

View File

@ -75,13 +75,28 @@ func NewManager(config *option.Config, a *AlertingRulesManager) *Manager {
EvaluationInterval: model.Duration(time.Second * 30),
},
RuleFiles: []string{config.AlertingRulesFile},
AlertingConfig:AlertingConfig{
AlertmanagerConfigs:[]*AlertmanagerConfig{},
},
},
Registry: reg,
httpClient: client,
l: &sync.Mutex{},
a: a,
}
m.LoadConfig()
al := &AlertmanagerConfig{
ServiceDiscoveryConfig:ServiceDiscoveryConfig{
StaticConfigs:[]*Group{
{
Targets:config.AlertManagerUrl,
},
},
},
}
m.Config.AlertingConfig.AlertmanagerConfigs = append(m.Config.AlertingConfig.AlertmanagerConfigs, al)
m.SaveConfig()
m.a.InitRulesConfig()
return m

View File

@ -152,6 +152,13 @@ func NewRulesManager(config *option.Config) *AlertingRulesManager {
Labels: map[string]string{},
Annotations: map[string]string{"summary": "webcli unhealthy"},
},
&RulesConfig{
Alert: "WebcliUnhealthy",
Expr: "webcli_exporter_execute_command_failed > 100",
For: "3m",
Labels: map[string]string{},
Annotations: map[string]string{"summary": "The number of errors that occurred while executing the command was greater than 100."},
},
},
},
},

View File

@ -266,6 +266,23 @@ func PutLabel(w http.ResponseWriter, r *http.Request) {
//DownNode 节点下线,计算节点操作
func DownNode(w http.ResponseWriter, r *http.Request) {
nodeUID := strings.TrimSpace(chi.URLParam(r, "node_id"))
nodes, _ := nodeService.GetAllNode()
if nodes != nil && len(nodes) > 0 {
count := 0
for _, node := range nodes {
if node.Role.HasRule("manage") {
count++
}
}
if count < 2 {
err := utils.APIHandleError{
Code: 403,
Err: errors.New(fmt.Sprint("manage node less one, can not down it.")),
}
err.Handle(r, w)
return
}
}
logrus.Info("Node down by node api controller: ", nodeUID)
node, err := nodeService.DownNode(nodeUID)
if err != nil {

View File

@ -41,6 +41,7 @@ import (
"github.com/goodrain/rainbond/node/kubecache"
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/goodrain/rainbond/util"
"encoding/json"
)
const (
@ -133,25 +134,38 @@ func (n *Cluster) checkNodeStatus() {
_, err := n.kubecli.GetNode(node.ID)
// delete the node in k8s if type is compute
if node.Role.HasRule(client.ComputeNode) && err == nil {
logrus.Infof("Node %s status is %v %d times and down it.",
logrus.Infof("Node %s status is %v %d times and can not scheduling.",
node.ID, ready, unhealthyCounter[node.ID])
err := n.kubecli.DownK8sNode(node.ID)
_, err := n.kubecli.CordonOrUnCordon(node.ID, true)
if err != nil {
logrus.Error("Failed to delete node in k8s: ", err)
}
n, err := n.kubecli.GetNode(node.ID)
fmt.Printf("======== deleted: %v, %v", err, n)
}
} else {
unhealthyCounter[node.ID]++
}
} else if ready {
resp, err := store.DefalutClient.Get("/rainbond/nodes/target/" + node.ID)
if err != nil {
logrus.Error(err)
continue
}
var targetNode client.HostNode
err = json.Unmarshal(resp.Kvs[0].Value, &targetNode)
if err != nil {
logrus.Error(err)
continue
}
if targetNode.NodeStatus.Status != Running {
logrus.Info("Skip open scheduling, because target node is: ", targetNode.NodeStatus.Status)
continue
}
unhealthyCounter[node.ID] = 0
_, err := n.kubecli.GetNode(node.ID)
_, err = n.kubecli.GetNode(node.ID)
// add the node into k8s if type is compute
if node.Role.HasRule(client.ComputeNode) && err != nil {
logrus.Infof("Node %s status is %v and up it.", node.ID, ready)
_, err := n.kubecli.UpK8sNode(node)
logrus.Infof("Node %s status is %v and can scheduling.", node.ID, ready)
_, err := n.kubecli.CordonOrUnCordon(node.ID, false)
if err != nil {
logrus.Error("Failed to add node into k8s: ", err)
}
@ -237,8 +251,11 @@ func (n *Cluster) GetNode(id string) *client.HostNode {
return nil
}
func (n *Cluster) handleNodeStatus(v *client.HostNode) {
logrus.Info("=====>node")
if v.Role.HasRule("compute") {
logrus.Info("=====>compute")
k8sNode, err := n.kubecli.GetNode(v.ID)
logrus.Info(k8sNode,"====k8sNode")
status := Running
if err != nil {
logrus.Infof("get k8s node error:%s", err.Error())
@ -246,16 +263,38 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
v.Status = status
v.NodeStatus.Status = status
v.Unschedulable = true
r := client.NodeCondition{
Type: client.NodeReady,
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "The node has been offline",
}
v.UpdataCondition(r)
return
}
v.Unschedulable = false
if k8sNode != nil {
logrus.Info(k8sNode.Spec.Unschedulable)
v.UpdataK8sCondition(k8sNode.Status.Conditions)
if v.Unschedulable == true || k8sNode.Spec.Unschedulable == true{
v.Unschedulable = true
}
}
if time.Now().Sub(v.UpTime) > time.Minute*2 {
status = Unknown
v.Status = status
v.NodeStatus.Status = status
v.Unschedulable = true
r := client.NodeCondition{
Type: client.NodeReady,
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "Node lost connection, state unknown",
}
v.UpdataCondition(r)
return
}
//var haveready bool
@ -300,6 +339,7 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
}
if v.Role.HasRule("manage") && !v.Role.HasRule("compute") { //manage install_success == runnint
logrus.Info("=====>manage")
if v.Status == Init || v.Status == InitSuccess || v.Status == InitFailed || v.Status == Installing {
return
}
@ -307,6 +347,14 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
if time.Now().Sub(v.UpTime) > time.Minute*2 {
v.Status = Unknown
v.NodeStatus.Status = Unknown
r := client.NodeCondition{
Type: client.NodeReady,
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "Node lost connection, state unknown",
}
v.UpdataCondition(r)
return
}
@ -340,6 +388,14 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
} else {
v.Status = Offline
v.NodeStatus.Status = Offline
r := client.NodeCondition{
Type: client.NodeReady,
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "The node has been offline",
}
v.UpdataCondition(r)
}
}
}

View File

@ -37,13 +37,19 @@ type ControllerSystemd struct {
conf *option.Conf
cluster client.ClusterClient
regBlock *regexp.Regexp
ServiceCli string
}
// At the stage you want to load the configurations of all rainbond components
func NewControllerSystemd(conf *option.Conf, cluster client.ClusterClient) *ControllerSystemd {
cli, err := exec.LookPath("systemctl")
if err != nil {
panic(err)
}
return &ControllerSystemd{
conf: conf,
cluster: cluster,
ServiceCli: cli,
SysConfigDir: "/etc/systemd/system",
}
}
@ -55,7 +61,7 @@ func (m *ControllerSystemd) CheckBeforeStart() bool {
}
func (m *ControllerSystemd) StartService(serviceName string) error {
err := exec.Command("/usr/bin/systemctl", "start", serviceName).Run()
err := exec.Command(m.ServiceCli, "start", serviceName).Run()
if err != nil {
logrus.Errorf("Start service %s: %v", serviceName, err)
return err
@ -64,7 +70,7 @@ func (m *ControllerSystemd) StartService(serviceName string) error {
}
func (m *ControllerSystemd) StopService(serviceName string) error {
err := exec.Command("/usr/bin/systemctl", "stop", serviceName).Run()
err := exec.Command(m.ServiceCli, "stop", serviceName).Run()
if err != nil {
logrus.Errorf("Stop service %s: %v", serviceName, err)
return err
@ -73,7 +79,7 @@ func (m *ControllerSystemd) StopService(serviceName string) error {
}
func (m *ControllerSystemd) RestartService(serviceName string) error {
err := exec.Command("/usr/bin/systemctl", "restart", serviceName).Run()
err := exec.Command(m.ServiceCli, "restart", serviceName).Run()
if err != nil {
logrus.Errorf("Restart service %s: %v", serviceName, err)
return err
@ -85,7 +91,7 @@ func (m *ControllerSystemd) RestartService(serviceName string) error {
func (m *ControllerSystemd) StartList(list []*service.Service) error {
logrus.Info("Starting all services.")
err := exec.Command("/usr/bin/systemctl", "start", "multi-user.target").Run()
err := exec.Command(m.ServiceCli, "start", "multi-user.target").Run()
if err != nil {
logrus.Errorf("Start target multi-user: %v", err)
return err
@ -97,7 +103,7 @@ func (m *ControllerSystemd) StartList(list []*service.Service) error {
func (m *ControllerSystemd) StopList(list []*service.Service) error {
logrus.Info("Stop all services.")
for _, s := range list {
err := exec.Command("/usr/bin/systemctl", "stop", s.Name).Run()
err := exec.Command(m.ServiceCli, "stop", s.Name).Run()
if err != nil {
logrus.Errorf("Enable service %s: %v", s.Name, err)
}
@ -108,7 +114,7 @@ func (m *ControllerSystemd) StopList(list []*service.Service) error {
func (m *ControllerSystemd) EnableService(name string) error {
logrus.Info("Enable service config by systemd.")
err := exec.Command("/usr/bin/systemctl", "enable", name).Run()
err := exec.Command(m.ServiceCli, "enable", name).Run()
if err != nil {
logrus.Errorf("Enable service %s: %v", name, err)
}
@ -118,7 +124,7 @@ func (m *ControllerSystemd) EnableService(name string) error {
func (m *ControllerSystemd) DisableService(name string) error {
logrus.Info("Disable service config by systemd.")
err := exec.Command("/usr/bin/systemctl", "disable", name).Run()
err := exec.Command(m.ServiceCli, "disable", name).Run()
if err != nil {
logrus.Errorf("Disable service %s: %v", name, err)
}
@ -141,6 +147,13 @@ func (m *ControllerSystemd) WriteConfig(s *service.Service) error {
return err
}
logrus.Info("Reload config for systemd by daemon-reload.")
err := exec.Command(m.ServiceCli, "daemon-reload").Run()
if err != nil {
logrus.Errorf("Reload config by systemd daemon-reload for %s: %v ", s.Name, err)
return err
}
return nil
}
@ -152,5 +165,12 @@ func (m *ControllerSystemd) RemoveConfig(name string) error {
os.Remove(fileName)
}
logrus.Info("Reload config for systemd by daemon-reload.")
err = exec.Command(m.ServiceCli, "daemon-reload").Run()
if err != nil {
logrus.Errorf("Reload config by systemd daemon-reload for %s: %v ", name, err)
return err
}
return nil
}

View File

@ -30,6 +30,7 @@ import (
"io/ioutil"
"os/exec"
"time"
"reflect"
)
type ManagerService struct {
@ -96,6 +97,7 @@ func (m *ManagerService) Online() error {
return fmt.Errorf("check environments is not passed")
}
// start all by systemctl start multi-user.target
m.ctr.StartList(m.services)
m.StartSyncService()
@ -128,18 +130,6 @@ func (m *ManagerService) Offline() error {
return nil
}
func (m *ManagerService) ReloadService() {
services, err := service.LoadServicesFromLocal(m.conf.ServiceListFile)
if err != nil {
logrus.Error("Failed to reload all services: ", err)
return
}
m.services = services
m.Online()
}
// synchronize all service status to as we expect
func (m *ManagerService) StartSyncService() {
m.syncCtx, m.syncCancel = context.WithCancel(context.Background())
@ -224,12 +214,43 @@ func (m *ManagerService) WaitStart(name string, duration time.Duration) bool {
}
/*
1. reload services config from local file system
2. regenerate systemd config for all service
3. start all services of status is not running
1. reload services info from local file system
2. regenerate systemd config file and restart with config changes
3. start all newly added services
*/
func (m *ManagerService) ReLoadServices() error {
return m.Online()
services, err := service.LoadServicesFromLocal(m.conf.ServiceListFile)
if err != nil {
logrus.Error("Failed to reload all services: ", err)
return err
}
for _, ne := range services {
exists := false
for _, old := range m.services {
if ne.Name == old.Name {
if !reflect.DeepEqual(ne, old) {
logrus.Infof("Recreate service [%s]", ne.Name)
if err := m.ctr.WriteConfig(ne); err == nil {
m.ctr.EnableService(ne.Name)
m.ctr.RestartService(ne.Name)
}
}
exists = true
break
}
}
if !exists {
logrus.Infof("Create service [%s]", ne.Name)
if err := m.ctr.WriteConfig(ne); err == nil {
m.ctr.EnableService(ne.Name)
m.ctr.StartService(ne.Name)
}
}
}
m.services = services
return nil
}
func (m *ManagerService) WriteServices() error {
@ -291,8 +312,11 @@ func StartRequiresSystemd(conf *option.Conf) error {
logrus.Error("Failed to load all services: ", err)
return err
}
err = exec.Command("/usr/bin/systemctl", "start", "docker").Run()
cli, err := exec.LookPath("systemctl")
if err != nil {
panic(err)
}
err = exec.Command(cli, "start", "docker").Run()
if err != nil {
fmt.Printf("Start docker daemon: %v", err)
return err
@ -312,7 +336,7 @@ func StartRequiresSystemd(conf *option.Conf) error {
fmt.Printf("Generate config file %s: %v", fileName, err)
return err
}
err = exec.Command("/usr/bin/systemctl", "start", s.Name).Run()
err = exec.Command(cli, "start", s.Name).Run()
if err != nil {
fmt.Printf("Start service %s: %v", s.Name, err)
return err

View File

@ -193,6 +193,7 @@ func (p *probeManager) Stop() error {
func (p *probeManager) CloseWatch(serviceName string, id string) error {
channel := p.watches[serviceName][id].statusChan
close(channel)
delete(p.watches[serviceName], id)
return nil
}
func (p *probeManager) GetServiceHealthy(serviceName string) (*service.HealthStatus, bool) {