Merge branch 'master' into gc

This commit is contained in:
黄润豪 2019-12-03 23:04:06 +08:00 committed by GitHub
commit c6df516031
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2462 changed files with 2584734 additions and 1305 deletions

View File

@ -10,12 +10,7 @@
**Rainbond** 企业应用云操作系统
Rainbond云帮是云原生下企业应用操作系统。 Rainbond支撑企业应用的开发、架构、交付和运维的全流程通过“无侵入”架构无缝衔接各类企业应用底层资源可以对接和管理IaaS、虚拟机和物理服务器。
```
企业应用包括:
各类信息系统、OA、CRM、ERP、数据库、大数据、物联网、互联网平台、微服务架构等运行在企业内部的各种系统。
```
Rainbond是以应用为中心面向云原生的企业应用操作系统将企业数字资产抽象为应用模型Rainbond Application Model并通过容器化环境、敏捷DevOps、微服务架构、组件制品库、serverless部署、底层资源自动化运维等核心技术通过搭建数字中台、加速前台业务需求响应帮助企业获得面向未来的创新能力和竞争力。
<img src="https://grstatic.oss-cn-shanghai.aliyuncs.com/images/Rainbond%E4%BA%A7%E5%93%81%E6%9E%B6%E6%9E%84.png" width="100%">
@ -45,8 +40,12 @@ DevOps开发流水线、微服务架构、服务治理及各类技术工具“
| Serverless PaaS | 以应用为核心使用过程不需要了解服务器相关概念简单灵活。通过对接行业应用快速构建行业专有PaaS。 |
| 应用网关 | 基于HTTP、HTTPs、TCP、UDP等协议应用访问控制策略轻松操作应用灰度发布、A/B测试。 |
| 异构服务统一管理 | 支持集群内外不同架构服务统一管理、监控和通信治理。 |
| 应用描述模型 | 以应用为中心描述应用包含的组件特性,应用特性,部署运维特性,实现复杂应用的标准版交付 |
更多功能特性详见:
[Rainbond功能特性说明](https://www.rainbond.com/docs/quick-start/edition/)
[Rainbond开发计划](https://www.rainbond.com/docs/quick-start/roadmap/)
更多功能特性详见: [Rainbond功能特性说明](https://www.rainbond.com/docs/quick-start/edition/)
## 快速开始
1. [快速安装 Rainbond 集群](https://www.rainbond.com/docs/quick-start/rainbond_install/)

View File

@ -71,6 +71,8 @@ type ServiceInterface interface {
ShareResult(w http.ResponseWriter, r *http.Request)
BuildVersionInfo(w http.ResponseWriter, r *http.Request)
GetDeployVersion(w http.ResponseWriter, r *http.Request)
AutoscalerRules(w http.ResponseWriter, r *http.Request)
ScalingRecords(w http.ResponseWriter, r *http.Request)
}
//TenantInterfaceWithV1 funcs for both v2 and v1
@ -105,7 +107,7 @@ type PluginInterface interface {
PluginAction(w http.ResponseWriter, r *http.Request)
PluginDefaultENV(w http.ResponseWriter, r *http.Request)
PluginBuild(w http.ResponseWriter, r *http.Request)
GetAllPluginBuildVersons(w http.ResponseWriter, r *http.Request)
GetAllPluginBuildVersions(w http.ResponseWriter, r *http.Request)
GetPluginBuildVersion(w http.ResponseWriter, r *http.Request)
DeletePluginBuildVersion(w http.ResponseWriter, r *http.Request)
//plugin

View File

@ -36,7 +36,7 @@ func (v2 *V2) pluginRouter() chi.Router {
r.Delete("/", controller.GetManager().PluginAction)
r.Post("/build", controller.GetManager().PluginBuild)
//get this plugin all build version
r.Get("/build-version", controller.GetManager().GetAllPluginBuildVersons)
r.Get("/build-version", controller.GetManager().GetAllPluginBuildVersions)
r.Get("/build-version/{version_id}", controller.GetManager().GetPluginBuildVersion)
r.Delete("/build-version/{version_id}", controller.GetManager().DeletePluginBuildVersion)
return r

View File

@ -247,6 +247,11 @@ func (v2 *V2) serviceRouter() chi.Router {
r.Get("/pods/{pod_name}/detail", controller.GetManager().PodDetail)
// autoscaler
r.Post("/xparules", middleware.WrapEL(controller.GetManager().AutoscalerRules, dbmodel.TargetTypeService, "add-app-autoscaler-rule", dbmodel.SYNEVENTTYPE))
r.Put("/xparules", middleware.WrapEL(controller.GetManager().AutoscalerRules, dbmodel.TargetTypeService, "update-app-autoscaler-rule", dbmodel.SYNEVENTTYPE))
r.Get("/xparecords", controller.GetManager().ScalingRecords)
return r
}

View File

@ -0,0 +1,130 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package controller
import (
"net/http"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/jinzhu/gorm"
"github.com/goodrain/rainbond/api/handler"
"github.com/goodrain/rainbond/api/middleware"
"github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/db/errors"
httputil "github.com/goodrain/rainbond/util/http"
)
// AutoscalerRules -
func (t *TenantStruct) AutoscalerRules(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "POST":
t.addAutoscalerRule(w, r)
case "PUT":
t.updAutoscalerRule(w, r)
}
}
func (t *TenantStruct) addAutoscalerRule(w http.ResponseWriter, r *http.Request) {
var req model.AutoscalerRuleReq
ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &req, nil)
if !ok {
return
}
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
req.ServiceID = serviceID
if err := handler.GetServiceManager().AddAutoscalerRule(&req); err != nil {
if err == errors.ErrRecordAlreadyExist {
httputil.ReturnError(r, w, 400, err.Error())
return
}
logrus.Errorf("add autoscaler rule: %v", err)
httputil.ReturnError(r, w, 500, err.Error())
return
}
httputil.ReturnSuccess(r, w, nil)
}
func (t *TenantStruct) updAutoscalerRule(w http.ResponseWriter, r *http.Request) {
var req model.AutoscalerRuleReq
ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &req, nil)
if !ok {
return
}
if err := handler.GetServiceManager().UpdAutoscalerRule(&req); err != nil {
if err == errors.ErrRecordAlreadyExist {
httputil.ReturnError(r, w, 400, err.Error())
return
}
if err == gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 404, err.Error())
return
}
logrus.Errorf("update autoscaler rule: %v", err)
httputil.ReturnError(r, w, 500, err.Error())
return
}
httputil.ReturnSuccess(r, w, nil)
}
// ScalingRecords -
func (t *TenantStruct) ScalingRecords(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
t.listScalingRecords(w, r)
}
}
func (t *TenantStruct) listScalingRecords(w http.ResponseWriter, r *http.Request) {
pageStr := r.URL.Query().Get("page")
page, err := strconv.Atoi(pageStr)
if err != nil {
logrus.Warningf("convert '%s(pageStr)' to int: %v", pageStr, err)
}
if page <= 0 {
page = 1
}
pageSizeStr := r.URL.Query().Get("page_size")
pageSize, err := strconv.Atoi(pageSizeStr)
if err != nil {
logrus.Warningf("convert '%s(pageSizeStr)' to int: %v", pageSizeStr, err)
}
if pageSize <= 0 {
pageSize = 10
}
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
records, count, err := handler.GetServiceManager().ListScalingRecords(serviceID, page, pageSize)
if err != nil {
logrus.Errorf("list scaling rule: %v", err)
httputil.ReturnError(r, w, 500, err.Error())
return
}
httputil.ReturnSuccess(r, w, map[string]interface{}{
"total": count,
"data": records,
})
}

View File

@ -22,10 +22,11 @@ import (
"net/http"
"github.com/go-chi/chi"
"github.com/jinzhu/gorm"
"github.com/goodrain/rainbond/api/handler"
"github.com/goodrain/rainbond/api/handler/group"
"github.com/goodrain/rainbond/api/middleware"
httputil "github.com/goodrain/rainbond/util/http"
)
@ -117,9 +118,14 @@ func GetBackup(w http.ResponseWriter, r *http.Request) {
//DeleteBackup delete backup
func DeleteBackup(w http.ResponseWriter, r *http.Request) {
backupID := chi.URLParam(r, "backup_id")
err := handler.GetAPPBackupHandler().DeleteBackup(backupID)
if err != nil {
err.Handle(r, w)
if err == gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 404, "not found")
return
}
httputil.ReturnError(r, w, 500, err.Error())
return
}
httputil.ReturnSuccess(r, w, nil)

View File

@ -290,7 +290,7 @@ func (t *TenantStruct) PluginBuild(w http.ResponseWriter, r *http.Request) {
httputil.ReturnSuccess(r, w, pbv)
}
//GetAllPluginBuildVersons 获取该插件所有的构建版本
//GetAllPluginBuildVersions 获取该插件所有的构建版本
// swagger:operation GET /v2/tenants/{tenant_name}/plugin/{plugin_id}/build-version v2 allPluginVersions
//
// 获取所有的构建版本信息
@ -311,7 +311,7 @@ func (t *TenantStruct) PluginBuild(w http.ResponseWriter, r *http.Request) {
// schema:
// "$ref": "#/responses/commandResponse"
// description: 统一返回格式
func (t *TenantStruct) GetAllPluginBuildVersons(w http.ResponseWriter, r *http.Request) {
func (t *TenantStruct) GetAllPluginBuildVersions(w http.ResponseWriter, r *http.Request) {
pluginID := r.Context().Value(middleware.ContextKey("plugin_id")).(string)
versions, err := handler.GetPluginManager().GetAllPluginBuildVersions(pluginID)
if err != nil {

View File

@ -546,7 +546,23 @@ func (t *TenantStruct) GetTenants(w http.ResponseWriter, r *http.Request) {
//DeleteTenant DeleteTenant
func (t *TenantStruct) DeleteTenant(w http.ResponseWriter, r *http.Request) {
httputil.ReturnError(r, w, 400, "this rainbond version can not support delete tenant")
tenantID := r.Context().Value(middleware.ContextKey("tenant_id")).(string)
if err := handler.GetTenantManager().DeleteTenant(tenantID); err != nil {
if err == handler.ErrTenantStillHasServices || err == handler.ErrTenantStillHasPlugins {
httputil.ReturnError(r, w, 400, err.Error())
return
}
if err == gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 404, err.Error())
return
}
httputil.ReturnError(r, w, 500, fmt.Sprintf("delete tenant: %v", err))
return
}
httputil.ReturnSuccess(r, w, nil)
}
//UpdateTenant UpdateTenant

View File

@ -282,6 +282,7 @@ func (t *TenantStruct) HorizontalService(w http.ResponseWriter, r *http.Request)
TenantID: tenantID,
ServiceID: serviceID,
EventID: sEvent.EventID,
Username: sEvent.UserName,
Replicas: replicas,
}

View File

@ -134,7 +134,7 @@ func (g *GatewayAction) UpdateHTTPRule(req *apimodel.UpdateHTTPRuleStruct) (stri
tx.Rollback()
return "", err
}
if rule == nil {
if rule == nil || rule.UUID == "" { // rule won't be nil
tx.Rollback()
return "", fmt.Errorf("HTTPRule dosen't exist based on uuid(%s)", req.HTTPRuleID)
}
@ -156,6 +156,8 @@ func (g *GatewayAction) UpdateHTTPRule(req *apimodel.UpdateHTTPRuleStruct) (stri
return "", err
}
rule.CertificateID = req.CertificateID
} else {
rule.CertificateID = ""
}
if len(req.RuleExtensions) > 0 {
// delete old RuleExtensions

View File

@ -28,6 +28,9 @@ import (
"github.com/Sirupsen/logrus"
"github.com/coreos/etcd/clientv3"
"github.com/jinzhu/gorm"
"github.com/pquerna/ffjson/ffjson"
"github.com/goodrain/rainbond/api/util"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
@ -36,8 +39,6 @@ import (
core_util "github.com/goodrain/rainbond/util"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/client"
"github.com/jinzhu/gorm"
"github.com/pquerna/ffjson/ffjson"
)
//Backup GroupBackup
@ -51,24 +52,18 @@ type Backup struct {
GroupID string `json:"group_id" validate:"group_name|required"`
Metadata string `json:"metadata,omitempty" validate:"metadata|required"`
ServiceIDs []string `json:"service_ids" validate:"service_ids|required"`
Mode string `json:"mode" validate:"mode|required|in:full-online,full-offline"`
Version string `json:"version" validate:"version|required"`
SlugInfo struct {
Namespace string `json:"namespace"`
FTPHost string `json:"ftp_host"`
FTPPort string `json:"ftp_port"`
FTPUser string `json:"ftp_username"`
FTPPassword string `json:"ftp_password"`
} `json:"slug_info,omitempty"`
ImageInfo struct {
HubURL string `json:"hub_url"`
HubUser string `json:"hub_user"`
HubPassword string `json:"hub_password"`
Namespace string `json:"namespace"`
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
SourceDir string `json:"source_dir"`
BackupID string `json:"backup_id,omitempty"`
SourceDir string `json:"source_dir"`
BackupID string `json:"backup_id,omitempty"`
Mode string `json:"mode" validate:"mode|required|in:full-online,full-offline"`
S3Config struct {
Provider string `json:"provider"`
Endpoint string `json:"endpoint"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
BucketName string `json:"bucket_name"`
} `json:"s3_config"`
}
}
@ -112,7 +107,9 @@ func (h *BackupHandle) NewBackup(b Backup) (*dbmodel.AppBackup, *util.APIHandleE
appBackup.SourceDir = sourceDir
//snapshot the app metadata of region and write
if err := h.snapshot(b.Body.ServiceIDs, sourceDir); err != nil {
os.RemoveAll(sourceDir)
if err := os.RemoveAll(sourceDir); err != nil {
logrus.Warningf("error removing %s: %v", sourceDir, err)
}
if strings.HasPrefix(err.Error(), "Statefulset app must be closed") {
return nil, util.CreateAPIHandleError(401, fmt.Errorf("snapshot group apps error,%s", err))
}
@ -154,21 +151,29 @@ func (h *BackupHandle) GetBackup(backupID string) (*dbmodel.AppBackup, *util.API
}
//DeleteBackup delete backup
func (h *BackupHandle) DeleteBackup(backupID string) *util.APIHandleError {
backup, err := h.GetBackup(backupID)
func (h *BackupHandle) DeleteBackup(backupID string) error {
backup, err := db.GetManager().AppBackupDao().GetAppBackup(backupID)
if err != nil {
return err
}
//if status != success it could be deleted
//if status == success, backup mode must be offline could be deleted
if backup.Status != "success" || backup.BackupMode == "full-offline" {
backup.Deleted = true
if er := db.GetManager().AppBackupDao().UpdateModel(backup); er != nil {
return util.CreateAPIHandleErrorFromDBError("delete backup error", er)
}
return nil
tx := db.GetManager().Begin()
defer db.GetManager().EnsureEndTransactionFunc()(tx)
if err := db.GetManager().AppBackupDaoTransactions(tx).DeleteAppBackup(backupID); err != nil {
tx.Rollback()
return fmt.Errorf("delete backup error: %v", err)
}
return util.CreateAPIHandleErrorf(400, "backup success do not support delete.")
if backup.BackupMode == "full-offline" {
logrus.Infof("delete from local: %s", backup.SourceDir)
if err := os.RemoveAll(backup.SourceDir); err != nil {
tx.Rollback()
return fmt.Errorf("remove backup directory: %v", err)
}
}
return tx.Commit().Error
}
//GetBackupByGroupID get some backup info by group id
@ -340,20 +345,6 @@ func (h *BackupHandle) snapshot(ids []string, sourceDir string) error {
type BackupRestore struct {
BackupID string `json:"backup_id"`
Body struct {
SlugInfo struct {
Namespace string `json:"namespace"`
FTPHost string `json:"ftp_host"`
FTPPort string `json:"ftp_port"`
FTPUser string `json:"ftp_username"`
FTPPassword string `json:"ftp_password"`
} `json:"slug_info,omitempty"`
ImageInfo struct {
HubURL string `json:"hub_url"`
HubUser string `json:"hub_user"`
HubPassword string `json:"hub_password"`
Namespace string `json:"namespace"`
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
EventID string `json:"event_id"`
//need restore target tenant id
TenantID string `json:"tenant_id"`
@ -361,6 +352,14 @@ type BackupRestore struct {
//RestoreMode(cdot) current datacenter and other tenant
//RestoreMode(od) other datacenter
RestoreMode string `json:"restore_mode"`
S3Config struct {
Provider string `json:"provider"`
Endpoint string `json:"endpoint"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
BucketName string `json:"bucket_name"`
} `json:"s3_config"`
}
}
@ -403,12 +402,11 @@ func (h *BackupHandle) RestoreBackup(br BackupRestore) (*RestoreResult, *util.AP
}
restoreID = core_util.NewUUID()
var dataMap = map[string]interface{}{
"slug_info": br.Body.SlugInfo,
"image_info": br.Body.ImageInfo,
"backup_id": backup.BackupID,
"tenant_id": br.Body.TenantID,
"restore_id": restoreID,
"restore_mode": br.Body.RestoreMode,
"s3_config": br.Body.S3Config,
}
err := h.mqcli.SendBuilderTopic(mqclient.TaskStruct{
TaskBody: dataMap,

View File

@ -36,6 +36,7 @@ import (
"github.com/goodrain/rainbond/builder/parser"
"github.com/goodrain/rainbond/cmd/api/option"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/errors"
core_model "github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
@ -519,9 +520,17 @@ func (s *ServiceAction) ServiceCreate(sc *api_model.ServiceStruct) error {
env.ServiceID = ts.ServiceID
env.TenantID = ts.TenantID
if err := db.GetManager().TenantServiceEnvVarDaoTransactions(tx).AddModel(&env); err != nil {
logrus.Errorf("add env %v error, %v", env.AttrName, err)
tx.Rollback()
return err
logrus.Errorf("add env[name=%s] error, %v", env.AttrName, err)
if err != errors.ErrRecordAlreadyExist {
tx.Rollback()
return err
}
logrus.Warningf("recover env[name=%s]", env.AttrName)
// if env already exists, update it
if err = db.GetManager().TenantServiceEnvVarDaoTransactions(tx).UpdateModel(&env); err != nil {
tx.Rollback()
return err
}
}
}
}
@ -1982,6 +1991,127 @@ func (s *ServiceAction) ListVersionInfo(serviceID string) (*api_model.BuildListR
return result, nil
}
// AddAutoscalerRule -
func (s *ServiceAction) AddAutoscalerRule(req *api_model.AutoscalerRuleReq) error {
tx := db.GetManager().Begin()
defer db.GetManager().EnsureEndTransactionFunc()
r := &dbmodel.TenantServiceAutoscalerRules{
RuleID: req.RuleID,
ServiceID: req.ServiceID,
Enable: req.Enable,
XPAType: req.XPAType,
MinReplicas: req.MinReplicas,
MaxReplicas: req.MaxReplicas,
}
if err := db.GetManager().TenantServceAutoscalerRulesDaoTransactions(tx).AddModel(r); err != nil {
tx.Rollback()
return err
}
for _, metric := range req.Metrics {
m := &dbmodel.TenantServiceAutoscalerRuleMetrics{
RuleID: req.RuleID,
MetricsType: metric.MetricsType,
MetricsName: metric.MetricsName,
MetricTargetType: metric.MetricTargetType,
MetricTargetValue: metric.MetricTargetValue,
}
if err := db.GetManager().TenantServceAutoscalerRuleMetricsDaoTransactions(tx).AddModel(m); err != nil {
tx.Rollback()
return err
}
}
taskbody := map[string]interface{}{
"service_id": r.ServiceID,
"rule_id": r.RuleID,
}
if err := s.MQClient.SendBuilderTopic(gclient.TaskStruct{
TaskType: "refreshhpa",
TaskBody: taskbody,
Topic: gclient.WorkerTopic,
}); err != nil {
logrus.Errorf("send 'refreshhpa' task: %v", err)
return err
}
logrus.Infof("rule id: %s; successfully send 'refreshhpa' task.", r.RuleID)
return tx.Commit().Error
}
// UpdAutoscalerRule -
func (s *ServiceAction) UpdAutoscalerRule(req *api_model.AutoscalerRuleReq) error {
rule, err := db.GetManager().TenantServceAutoscalerRulesDao().GetByRuleID(req.RuleID)
if err != nil {
return err
}
rule.Enable = req.Enable
rule.XPAType = req.XPAType
rule.MinReplicas = req.MinReplicas
rule.MaxReplicas = req.MaxReplicas
tx := db.GetManager().Begin()
defer db.GetManager().EnsureEndTransactionFunc()
if err := db.GetManager().TenantServceAutoscalerRulesDaoTransactions(tx).UpdateModel(rule); err != nil {
tx.Rollback()
return err
}
// delete metrics
if err := db.GetManager().TenantServceAutoscalerRuleMetricsDaoTransactions(tx).DeleteByRuleID(req.RuleID); err != nil {
tx.Rollback()
return err
}
for _, metric := range req.Metrics {
m := &dbmodel.TenantServiceAutoscalerRuleMetrics{
RuleID: req.RuleID,
MetricsType: metric.MetricsType,
MetricsName: metric.MetricsName,
MetricTargetType: metric.MetricTargetType,
MetricTargetValue: metric.MetricTargetValue,
}
if err := db.GetManager().TenantServceAutoscalerRuleMetricsDaoTransactions(tx).AddModel(m); err != nil {
tx.Rollback()
return err
}
}
taskbody := map[string]interface{}{
"service_id": rule.ServiceID,
"rule_id": rule.RuleID,
}
if err := s.MQClient.SendBuilderTopic(gclient.TaskStruct{
TaskType: "refreshhpa",
TaskBody: taskbody,
Topic: gclient.WorkerTopic,
}); err != nil {
logrus.Errorf("send 'refreshhpa' task: %v", err)
return err
}
logrus.Infof("rule id: %s; successfully send 'refreshhpa' task.", rule.RuleID)
return tx.Commit().Error
}
// ListScalingRecords -
func (s *ServiceAction) ListScalingRecords(serviceID string, page, pageSize int) ([]*dbmodel.TenantServiceScalingRecords, int, error) {
records, err := db.GetManager().TenantServiceScalingRecordsDao().ListByServiceID(serviceID, (page-1)*pageSize, pageSize)
if err != nil {
return nil, 0, err
}
count, err := db.GetManager().TenantServiceScalingRecordsDao().CountByServiceID(serviceID)
if err != nil {
return nil, 0, err
}
return records, count, nil
}
//TransStatus trans service status
func TransStatus(eStatus string) string {
switch eStatus {

View File

@ -73,4 +73,8 @@ type ServiceHandler interface {
GetServiceCheckInfo(uuid string) (*exector.ServiceCheckResult, *util.APIHandleError)
GetServiceDeployInfo(tenantID, serviceID string) (*pb.DeployInfo, *util.APIHandleError)
ListVersionInfo(serviceID string) (*api_model.BuildListRespVO, error)
AddAutoscalerRule(req *api_model.AutoscalerRuleReq) error
UpdAutoscalerRule(req *api_model.AutoscalerRuleReq) error
ListScalingRecords(serviceID string, page, pageSize int) ([]*dbmodel.TenantServiceScalingRecords, int, error)
}

View File

@ -32,23 +32,23 @@ import (
"github.com/goodrain/rainbond/cmd/api/option"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/mq/api/grpc/pb"
mqclient "github.com/goodrain/rainbond/mq/client"
cli "github.com/goodrain/rainbond/node/nodem/client"
"github.com/goodrain/rainbond/worker/client"
)
//TenantAction tenant act
type TenantAction struct {
MQClient pb.TaskQueueClient
MQClient mqclient.MQClient
statusCli *client.AppRuntimeSyncClient
OptCfg *option.Config
}
//CreateTenManager create Manger
func CreateTenManager(MQClient pb.TaskQueueClient, statusCli *client.AppRuntimeSyncClient,
func CreateTenManager(mqc mqclient.MQClient, statusCli *client.AppRuntimeSyncClient,
optCfg *option.Config) *TenantAction {
return &TenantAction{
MQClient: MQClient,
MQClient: mqc,
statusCli: statusCli,
OptCfg: optCfg,
}
@ -104,6 +104,53 @@ func (t *TenantAction) UpdateTenant(tenant *dbmodel.Tenants) error {
return db.GetManager().TenantDao().UpdateModel(tenant)
}
// DeleteTenant deletes tenant based on the given tenantID.
//
// tenant can only be deleted without service or plugin
func (t *TenantAction) DeleteTenant(tenantID string) error {
// check if there are still services
services, err := db.GetManager().TenantServiceDao().ListServicesByTenantID(tenantID)
if err != nil {
return err
}
if len(services) > 0 {
return ErrTenantStillHasServices
}
// check if there are still plugins
plugins, err := db.GetManager().TenantPluginDao().ListByTenantID(tenantID)
if err != nil {
return err
}
if len(plugins) > 0 {
return ErrTenantStillHasPlugins
}
tenant, err := db.GetManager().TenantDao().GetTenantByUUID(tenantID)
if err != nil {
return err
}
tenant.Status = dbmodel.TenantStatusDeleting.String()
if err := db.GetManager().TenantDao().UpdateModel(tenant); err != nil {
return err
}
// delete namespace in k8s
err = t.MQClient.SendBuilderTopic(mqclient.TaskStruct{
TaskType: "delete_tenant",
Topic: mqclient.WorkerTopic,
TaskBody: map[string]string{
"tenant_id": tenantID,
},
})
if err != nil {
logrus.Error("send task 'delete tenant'", err)
return err
}
return nil
}
//TotalMemCPU StatsMemCPU
func (t *TenantAction) TotalMemCPU(services []*dbmodel.TenantServices) (*api_model.StatsInfo, error) {
cpus := 0

View File

@ -44,4 +44,5 @@ type TenantHandler interface {
IsClosedStatus(status string) bool
BindTenantsResource(source []*dbmodel.Tenants) api_model.TenantList
UpdateTenant(*dbmodel.Tenants) error
DeleteTenant(tenantID string) error
}

28
api/handler/types.go Normal file
View File

@ -0,0 +1,28 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package handler
import "errors"
var (
// ErrTenantStillHasServices -
ErrTenantStillHasServices = errors.New("tenant still has services")
// ErrTenantStillHasPlugins -
ErrTenantStillHasPlugins = errors.New("tenant still has plugins")
)

51
api/model/autoscaler.go Normal file
View File

@ -0,0 +1,51 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package model
// AutoscalerRuleReq -
type AutoscalerRuleReq struct {
RuleID string `json:"rule_id" validate:"rule_id|required"`
ServiceID string
Enable bool `json:"enable" validate:"enable|required"`
XPAType string `json:"xpa_type" validate:"xpa_type|required"`
MinReplicas int `json:"min_replicas" validate:"min_replicas|required"`
MaxReplicas int `json:"max_replicas" validate:"min_replicas|required"`
Metrics []struct {
MetricsType string `json:"metric_type"`
MetricsName string `json:"metric_name"`
MetricTargetType string `json:"metric_target_type"`
MetricTargetValue int `json:"metric_target_value"`
} `json:"metrics"`
}
// AutoscalerRuleResp -
type AutoscalerRuleResp struct {
RuleID string `json:"rule_id"`
ServiceID string `json:"service_id"`
Enable bool `json:"enable"`
XPAType string `json:"xpa_type"`
MinReplicas int `json:"min_replicas"`
MaxReplicas int `json:"max_replicas"`
Metrics []struct {
MetricsType string `json:"metric_type"`
MetricsName string `json:"metric_name"`
MetricTargetType string `json:"metric_target_type"`
MetricTargetValue int `json:"metric_target_value"`
} `json:"metrics"`
}

View File

@ -33,9 +33,6 @@ import (
utilhttp "github.com/goodrain/rainbond/util/http"
)
func (r *regionImpl) Tasks() TaskInterface {
return &task{regionImpl: *r, prefix: "/v2/tasks"}
}
func (r *regionImpl) Nodes() NodeInterface {
return &node{regionImpl: *r, prefix: "/v2/nodes"}
}
@ -166,48 +163,54 @@ func (nl *nodeLabelImpl) List() (map[string]string, *util.APIHandleError) {
var res utilhttp.ResponseBody
res.Bean = &decode
code, err := nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/labels", "GET", nil, &res)
if err != nil || code != 200 {
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
return decode, nil
return decode, handleAPIResult(code, res)
}
func (nl *nodeLabelImpl) Delete(k string) *util.APIHandleError {
var decode map[string]string
var res utilhttp.ResponseBody
res.Bean = &decode
code, err := nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/labels", "GET", nil, &res)
if err != nil || code != 200 {
if err != nil {
return util.CreateAPIHandleError(code, err)
}
if handleAPIResult(code, res) != nil {
return handleAPIResult(code, res)
}
delete(decode, k)
body, err := json.Marshal(decode)
if err != nil {
return util.CreateAPIHandleError(400, err)
}
code, err = nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/labels", "PUT", bytes.NewBuffer(body), &res)
if err != nil || code != 200 {
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
func (nl *nodeLabelImpl) Add(k, v string) *util.APIHandleError {
var decode map[string]string
var res utilhttp.ResponseBody
res.Bean = &decode
code, err := nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/labels", "GET", nil, &res)
if err != nil || code != 200 {
if err != nil {
return util.CreateAPIHandleError(code, err)
}
if handleAPIResult(code, res) != nil {
return handleAPIResult(code, res)
}
decode[k] = v
body, err := json.Marshal(decode)
if err != nil {
return util.CreateAPIHandleError(400, err)
}
code, err = nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/labels", "PUT", bytes.NewBuffer(body), &res)
if err != nil || code != 200 {
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
type nodeConditionImpl struct {
@ -220,67 +223,70 @@ func (nl *nodeConditionImpl) List() ([]client.NodeCondition, *util.APIHandleErro
var res utilhttp.ResponseBody
res.List = &decode
code, err := nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/conditions", "GET", nil, &res)
if err != nil || code != 200 {
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
return decode, nil
return decode, handleAPIResult(code, res)
}
func (nl *nodeConditionImpl) Delete(k client.NodeConditionType) ([]client.NodeCondition, *util.APIHandleError) {
var decode []client.NodeCondition
var res utilhttp.ResponseBody
res.List = &decode
code, err := nl.nodeImpl.DoRequest(nl.nodeImpl.prefix+"/"+nl.NodeID+"/conditions/"+string(k), "DELETE", nil, &res)
if err != nil || code != 200 {
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
return decode, nil
return decode, handleAPIResult(code, res)
}
func (n *node) Delete(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid, "DELETE", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid, "DELETE", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
if code != 200 {
return util.CreateAPIHandleError(code, fmt.Errorf("delete node error"))
}
return nil
return handleAPIResult(code, res)
}
func (n *node) Up(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid+"/up", "POST", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid+"/up", "POST", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
func (n *node) Down(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid+"/down", "POST", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid+"/down", "POST", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
func (n *node) UnSchedulable(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid+"/unschedulable", "PUT", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid+"/unschedulable", "PUT", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
func (n *node) ReSchedulable(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid+"/reschedulable", "PUT", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid+"/reschedulable", "PUT", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
func (n *node) Install(nid string) *util.APIHandleError {
code, err := n.DoRequest(n.prefix+"/"+nid+"/install", "POST", nil, nil)
var res utilhttp.ResponseBody
code, err := n.DoRequest(n.prefix+"/"+nid+"/install", "POST", nil, &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
return handleAPIResult(code, res)
}
type configs struct {
@ -298,10 +304,7 @@ func (c *configs) Get() (*model.GlobalConfig, *util.APIHandleError) {
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
if code != 200 {
return nil, util.CreateAPIHandleError(code, fmt.Errorf("Get database center configs code %d", code))
}
return &gc, nil
return &gc, handleAPIResult(code, res)
}
func (c *configs) Put(gc *model.GlobalConfig) *util.APIHandleError {
@ -309,14 +312,12 @@ func (c *configs) Put(gc *model.GlobalConfig) *util.APIHandleError {
if err != nil {
return util.CreateAPIHandleError(400, err)
}
code, err := c.DoRequest(c.prefix+"/datacenter", "PUT", bytes.NewBuffer(rebody), nil)
var res utilhttp.ResponseBody
code, err := c.DoRequest(c.prefix+"/datacenter", "PUT", bytes.NewBuffer(rebody), &res)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
if code != 200 {
return util.CreateAPIHandleError(code, fmt.Errorf("Put database center configs code %d", code))
}
return nil
return handleAPIResult(code, res)
}
//TaskInterface task api
@ -366,83 +367,3 @@ type ConfigsInterface interface {
Get() (*model.GlobalConfig, *util.APIHandleError)
Put(*model.GlobalConfig) *util.APIHandleError
}
func (t *task) Get(id string) (*model.Task, *util.APIHandleError) {
var res utilhttp.ResponseBody
var gc model.Task
res.Bean = &gc
code, err := t.DoRequest(t.prefix+"/"+id, "GET", nil, &res)
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
if code != 200 {
return nil, util.CreateAPIHandleError(code, fmt.Errorf("get task with code %d", code))
}
return &gc, nil
}
//List list all task
func (t *task) List() ([]*model.Task, *util.APIHandleError) {
var res utilhttp.ResponseBody
var gc []*model.Task
res.List = &gc
code, err := t.DoRequest(t.prefix, "GET", nil, &res)
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
if code != 200 {
return nil, util.CreateAPIHandleError(code, fmt.Errorf("get task with code %d", code))
}
return gc, nil
}
//Exec 执行任务
func (t *task) Exec(taskID string, nodes []string) *util.APIHandleError {
var nodesBody struct {
Nodes []string `json:"nodes"`
}
nodesBody.Nodes = nodes
body, err := json.Marshal(nodesBody)
if err != nil {
return util.CreateAPIHandleError(400, err)
}
url := t.prefix + "/" + taskID + "/exec"
code, err := t.DoRequest(url, "POST", bytes.NewBuffer(body), nil)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
}
func (t *task) Add(task *model.Task) *util.APIHandleError {
body, _ := json.Marshal(task)
url := t.prefix
code, err := t.DoRequest(url, "POST", bytes.NewBuffer(body), nil)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
}
func (t *task) AddGroup(group *model.TaskGroup) *util.APIHandleError {
body, _ := json.Marshal(group)
url := "/v2/taskgroups"
code, err := t.DoRequest(url, "POST", bytes.NewBuffer(body), nil)
if err != nil {
return util.CreateAPIHandleError(code, err)
}
return nil
}
func (t *task) GetTaskStatus(task string) (map[string]*model.TaskStatus, *util.APIHandleError) {
var res utilhttp.ResponseBody
var gc = make(map[string]*model.TaskStatus)
res.Bean = &gc
code, err := t.DoRequest("/tasks/"+task+"/status", "GET", nil, &res)
if err != nil {
return nil, util.CreateAPIHandleError(code, err)
}
if code != 200 {
return nil, util.CreateAPIHandleError(code, fmt.Errorf("get task with code %d", code))
}
return gc, nil
}

View File

@ -47,7 +47,6 @@ var AllTenant string
type Region interface {
Tenants(name string) TenantInterface
Resources() ResourcesInterface
Tasks() TaskInterface
Nodes() NodeInterface
Cluster() ClusterInterface
Configs() ConfigsInterface
@ -70,8 +69,8 @@ type APIConf struct {
type serviceInfo struct {
ServicesAlias string `json:"serviceAlias"`
TenantName string `json:"tenantName"`
ServiceId string `json:"serviceId"`
TenantId string `json:"tenantId"`
ServiceID string `json:"serviceId"`
TenantID string `json:"tenantId"`
}
type podInfo struct {
@ -258,7 +257,10 @@ func (r *resourcesTenant) Get() (*model.TenantResource, *util.APIHandleError) {
func handleAPIResult(code int, res utilhttp.ResponseBody) *util.APIHandleError {
if code >= 300 {
return util.CreateAPIHandleErrorf(code, "msg:%s validation_error %+v", res.Msg, res.ValidationError)
if res.ValidationError != nil && len(res.ValidationError) > 0 {
return util.CreateAPIHandleErrorf(code, "msg:%s \napi validation_error: %+v", res.Msg, res.ValidationError)
}
return util.CreateAPIHandleErrorf(code, "msg:%s", res.Msg)
}
return nil
}

View File

@ -93,17 +93,17 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
} else {
buildOptions.NoCache = false
}
re.Logger.Info("开始编译源码", map[string]string{"step": "builder-exector"})
re.Logger.Info("start compiling the source code", map[string]string{"step": "builder-exector"})
err := sources.ImageBuild(re.DockerClient, re.SourceDir, buildOptions, re.Logger, 20)
if err != nil {
re.Logger.Error(fmt.Sprintf("构造编译镜像%s失败", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())
return nil, err
}
// check build image exist
_, err = sources.ImageInspectWithRaw(re.DockerClient, d.buildImageName)
if err != nil {
re.Logger.Error(fmt.Sprintf("构造镜像%s失败,请查看Debug日志", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("get image inspect error: %s", err.Error())
return nil, err
}
@ -111,7 +111,7 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
d.buildCacheDir = path.Join(re.CacheDir, re.DeployVersion)
err = d.copyBuildOut(d.buildCacheDir, d.buildImageName)
if err != nil {
re.Logger.Error(fmt.Sprintf("复制编译包失败"), map[string]string{"step": "builder-exector", "status": "failure"})
re.Logger.Error(fmt.Sprintf("copy compilation package failed, find log in rbd-chaos"), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("copy build output file error: %s", err.Error())
return nil, err
}
@ -131,25 +131,25 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
}
err = sources.ImageBuild(re.DockerClient, d.buildCacheDir, runbuildOptions, re.Logger, 20)
if err != nil {
re.Logger.Error(fmt.Sprintf("构造应用镜像%s失败", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())
return nil, err
}
// check build image exist
_, err = sources.ImageInspectWithRaw(re.DockerClient, d.imageName)
if err != nil {
re.Logger.Error(fmt.Sprintf("构造镜像%s失败,请查看Debug日志", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("get image inspect error: %s", err.Error())
return nil, err
}
re.Logger.Info("镜像构建成功,开始推送镜像至仓库", map[string]string{"step": "builder-exector"})
re.Logger.Info("build image success, start to push local image registry", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(re.DockerClient, d.imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, re.Logger, 5)
if err != nil {
re.Logger.Error("推送镜像失败", map[string]string{"step": "builder-exector"})
re.Logger.Error("push image to local image registry faliure, find log in rbd-chaos", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return nil, err
}
re.Logger.Info("镜像推送镜像至仓库成功", map[string]string{"step": "builder-exector"})
re.Logger.Info("push image to push local image registry success", map[string]string{"step": "builder-exector"})
if err := sources.ImageRemove(re.DockerClient, d.imageName); err != nil {
logrus.Errorf("remove image %s failure %s", d.imageName, err.Error())
}

78
builder/cloudos/alioss.go Normal file
View File

@ -0,0 +1,78 @@
package cloudos
import (
"fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
)
type aliOSS struct {
*oss.Client
*Config
}
func newAliOSS(cfg *Config) (CloudOSer, error) {
client, err := oss.New(cfg.Endpoint, cfg.AccessKey, cfg.SecretKey)
if err != nil {
return nil, err
}
return &aliOSS{Client: client, Config: cfg}, nil
}
func (a *aliOSS) PutObject(okey, filepath string) error {
// verify if bucket exists and you have permission to access it.
_, err := a.GetBucketStat(a.BucketName)
if err != nil {
svcErr, ok := err.(oss.ServiceError)
if !ok {
return err
}
return svcErrToS3SDKError(svcErr)
}
bucket, err := a.Bucket(a.BucketName)
if err != nil {
return fmt.Errorf("failed to gets the bucket instance: %v", err)
}
err = bucket.PutObjectFromFile(okey, filepath)
if err != nil {
return fmt.Errorf("failed to put object: %v", err)
}
return err
}
func (a *aliOSS) GetObject(objectKey, filePath string) error {
bucket, err := a.Bucket(a.BucketName)
if err != nil {
return fmt.Errorf("failed to gets the bucket instance: %v", err)
}
err = bucket.GetObjectToFile(objectKey, filePath)
if err != nil {
svcErr, ok := err.(oss.ServiceError)
if !ok {
return err
}
return svcErrToS3SDKError(svcErr)
}
return nil
}
func (a *aliOSS) DeleteObject(objkey string) error {
bucket, err := a.Bucket(a.BucketName)
if err != nil {
return fmt.Errorf("failed to gets the bucket instance: %v", err)
}
return bucket.DeleteObject(objkey)
}
func svcErrToS3SDKError(svcErr oss.ServiceError) S3SDKError {
return S3SDKError{
Code: svcErr.Code,
Message: svcErr.Message,
RawMessage: svcErr.RawMessage,
StatusCode: svcErr.StatusCode,
}
}

View File

@ -0,0 +1,23 @@
package cloudos
import (
"testing"
)
func TestAliOssDeleteObject(t *testing.T) {
cfg := &Config{
ProviderType: S3ProviderAliOSS,
Endpoint: "dummy",
AccessKey: "dummy",
SecretKey: "dummy",
BucketName: "hrhtest",
}
cr, err := newAliOSS(cfg)
if err != nil {
t.Fatalf("create alioss: %v", err)
}
if err := cr.DeleteObject("ca932c3215ec4d3891c30799e9aaacba_20191024205031.zip"); err != nil {
t.Error(err)
}
}

View File

@ -0,0 +1,68 @@
package cloudos
import (
"errors"
)
var (
// ErrUnsupportedS3Provider -
ErrUnsupportedS3Provider = errors.New("unsupported s3 provider")
)
// S3Provider -
type S3Provider string
var (
// S3ProviderS3 -
S3ProviderS3 S3Provider = "s3"
// S3ProviderAliOSS -
S3ProviderAliOSS S3Provider = "alioss"
)
func (p S3Provider) String() string {
return string(p)
}
// Str2S3Provider converts a string to S3Provider.
func Str2S3Provider(value string) (S3Provider, error) {
switch value {
case S3ProviderS3.String():
return S3ProviderS3, nil
case S3ProviderAliOSS.String():
return S3ProviderAliOSS, nil
default:
return "", ErrUnsupportedS3Provider
}
}
// CloudOSer is the interface that wraps the required methods to interact with cloud object storage.
type CloudOSer interface {
PutObject(objkey, filepath string) error
GetObject(objectKey, filePath string) error
DeleteObject(objkey string) error
}
// New returns a new CloudOSer.
func New(cfg *Config) (CloudOSer, error) {
switch cfg.ProviderType {
case S3ProviderAliOSS:
return newAliOSS(cfg)
case S3ProviderS3:
return newS3(cfg)
default:
return nil, ErrUnsupportedS3Provider
}
}
// Config configuration about cloud object storage.
type Config struct {
ProviderType S3Provider
Endpoint string
AccessKey string
SecretKey string
UseSSL bool
BucketName string
Location string
}

View File

@ -0,0 +1,136 @@
package cloudos
import (
"os"
"reflect"
"testing"
"path/filepath"
)
var endpoint = "dummy"
var accessKeyID = "dummy"
var secretAccessKey = "dummy"
func TestFileUpload(t *testing.T) {
tests := []struct {
name, bucketName, objkey, filepath string
providerType S3Provider
expErr bool
statusCode int
}{
{
name: "bucket not found",
providerType: S3ProviderAliOSS,
bucketName: "no-bucket",
expErr: true,
statusCode: 404,
},
{
name: "ok",
providerType: S3ProviderAliOSS,
bucketName: "hrhtest",
expErr: false,
statusCode: 200,
objkey: "goodrain-logo.png",
filepath: "goodrain-logo.png",
},
}
for idx := range tests {
tc := tests[idx]
t.Run(tc.name, func(t *testing.T) {
cfg := &Config{
ProviderType: tc.providerType,
Endpoint: endpoint,
AccessKey: accessKeyID,
SecretKey: secretAccessKey,
BucketName: tc.bucketName,
}
cloudoser, err := New(cfg)
if err != nil {
t.Errorf("error create cloudoser: %v", err)
return
}
dir := "/tmp/groupbackup/0d65c6608729438aad0a94f6317c80d0_20191024180024.zip"
_, filename := filepath.Split(dir)
if err := cloudoser.PutObject(filename, dir); err != nil {
s3err, ok := err.(S3SDKError)
if !ok {
t.Errorf("Expected 'S3SDKError' for err, but returned %v: %v", reflect.TypeOf(s3err), err)
return
}
if s3err.StatusCode != tc.statusCode {
t.Errorf("Expected %d for status code, but returned %d", tc.statusCode, s3err.StatusCode)
}
}
})
}
}
func TestGetObject(t *testing.T) {
tests := []struct {
name, bucketName, objkey, filepath string
providerType S3Provider
expErr bool
statusCode int
}{
{
name: "ok",
providerType: S3ProviderAliOSS,
bucketName: "hrhtest",
expErr: false,
statusCode: 200,
objkey: "goodrain-logo.png",
filepath: "/tmp/goodrain-logo.png",
},
{
name: "object not found",
providerType: S3ProviderAliOSS,
bucketName: "hrhtest",
expErr: true,
statusCode: 404,
objkey: "dummy-object-key",
filepath: "/tmp/dummy-object-key",
},
}
for idx := range tests {
tc := tests[idx]
t.Run(tc.name, func(t *testing.T) {
cfg := &Config{
ProviderType: tc.providerType,
Endpoint: endpoint,
AccessKey: accessKeyID,
SecretKey: secretAccessKey,
BucketName: tc.bucketName,
}
cloudoser, err := New(cfg)
if err != nil {
t.Errorf("error create cloudoser: %v", err)
return
}
if err := cloudoser.GetObject(tc.objkey, tc.filepath); err != nil {
s3err, ok := err.(S3SDKError)
if !ok {
t.Errorf("Expected 'S3SDKError' for err, but returned %v", reflect.TypeOf(s3err))
return
}
if s3err.StatusCode != tc.statusCode {
t.Errorf("Expected %d for status code, but returned %d", tc.statusCode, s3err.StatusCode)
}
return
}
// clean up
err = os.Remove(tc.filepath)
if err != nil {
t.Errorf("failed to remove file: %v", err)
}
})
}
}
func TestTypeConvert(t *testing.T) {
foo := S3Provider("Minio1")
t.Log(foo)
}

17
builder/cloudos/errors.go Normal file
View File

@ -0,0 +1,17 @@
package cloudos
import "fmt"
// S3SDKError -
type S3SDKError struct {
Code string // The error code returned from S3 to the caller
Message string // The detail error message from S3
RawMessage string // The raw messages from S3
StatusCode int // HTTP status code
}
// Error implements interface error
func (e S3SDKError) Error() string {
return fmt.Sprintf("s3: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\"",
e.StatusCode, e.Code, e.Message)
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

78
builder/cloudos/s3.go Normal file
View File

@ -0,0 +1,78 @@
package cloudos
import (
"io/ioutil"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type s3Driver struct {
s3 *s3.S3
*Config
}
func newS3(cfg *Config) (CloudOSer, error) {
s3Config := &aws.Config{
Credentials: credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, ""),
Endpoint: aws.String(cfg.Endpoint),
Region: aws.String("us-east-1"),
DisableSSL: aws.Bool(true),
S3ForcePathStyle: aws.Bool(true),
}
sess := session.New(s3Config)
s3obj := s3.New(sess)
s3Driver := s3Driver{
s3: s3obj,
Config: cfg,
}
return &s3Driver, nil
}
func (s *s3Driver) PutObject(objkey, filepath string) error {
fp, err := os.Open(filepath)
if err != nil {
return err
}
defer fp.Close()
_, err = s.s3.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objkey),
Body: fp,
})
return err
}
func (s *s3Driver) GetObject(objkey, filePath string) error {
resp, err := s.s3.GetObject(&s3.GetObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objkey),
Range: aws.String("bytes=" + strconv.FormatInt(0, 10) + "-"),
})
if err != nil {
return err
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
ioutil.WriteFile(filePath, b, os.ModePerm)
return nil
}
func (s *s3Driver) DeleteObject(objkey string) error {
_, err := s.s3.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.BucketName),
Key: aws.String(objkey),
})
return err
}

View File

@ -0,0 +1,59 @@
package cloudos
import (
"testing"
)
func TestS3PutObject(t *testing.T) {
cfg := &Config{
Endpoint: "9000.gr4433a2.52bli69h.0196bd.grapps.cn",
AccessKey: "dummy",
SecretKey: "dummy",
BucketName: "my-bucket",
}
cs, err := newS3(cfg)
if err != nil {
t.Fatalf("create s3 driver: %v", err)
}
if err := cs.PutObject("aws-sdk-go-1.25.25.zip", "/Users/abewang/Downloads/aws-sdk-go-1.25.25.zip"); err != nil {
t.Error(err)
}
}
func TestS3GetObject(t *testing.T) {
cfg := &Config{
Endpoint: "9000.gr4433a2.52bli69h.0196bd.grapps.cn",
AccessKey: "access_key",
SecretKey: "dummy",
BucketName: "my-bucket",
}
cs, err := newS3(cfg)
if err != nil {
t.Fatalf("create s3 driver: %v", err)
}
if err := cs.GetObject("goodrain-logo.png", "goodrain-logo2.png"); err != nil {
t.Error(err)
}
}
func TestS3DeleteObject(t *testing.T) {
cfg := &Config{
Endpoint: "9000.gr4433a2.52bli69h.0196bd.grapps.cn",
AccessKey: "access_key",
SecretKey: "dummy",
BucketName: "my-bucket",
}
cs, err := newS3(cfg)
if err != nil {
t.Fatalf("create s3 driver: %v", err)
}
if err := cs.DeleteObject("goodrain-logo.png"); err != nil {
t.Error(err)
}
}

View File

@ -19,6 +19,7 @@
package exector
import (
"context"
"fmt"
"os"
"path"
@ -196,7 +197,11 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
res, err := i.codeBuild()
if err != nil {
i.Logger.Error("Build app version from source code failure,"+err.Error(), map[string]string{"step": "builder-exector", "status": "failure"})
if err.Error() == context.DeadlineExceeded.Error() {
i.Logger.Error("Build app version from source code timeout, the maximum time is 30 minutes", map[string]string{"step": "builder-exector", "status": "failure"})
} else {
i.Logger.Error("Build app version from source code failure,"+err.Error(), map[string]string{"step": "builder-exector", "status": "failure"})
}
return err
}
if err := i.UpdateBuildVersionInfo(res); err != nil {

View File

@ -72,7 +72,12 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
if err != nil {
return nil, err
}
maxConcurrentTask := runtime.NumCPU() * 2
var maxConcurrentTask int
if conf.MaxTasks == 0 {
maxConcurrentTask = runtime.NumCPU() * 2
} else {
maxConcurrentTask = conf.MaxTasks
}
ctx, cancel := context.WithCancel(context.Background())
logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask)
return &exectorManager{

View File

@ -35,6 +35,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder/cloudos"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/pquerna/ffjson/ffjson"
@ -53,32 +54,26 @@ var maxBackupVersionSize = 3
//BackupAPPNew backup group app new version
type BackupAPPNew struct {
GroupID string `json:"group_id" `
ServiceIDs []string `json:"service_ids" `
//full-online,full-offline
Mode string `json:"mode"`
Version string `json:"version"`
EventID string
SlugInfo struct {
Namespace string `json:"namespace"`
FTPHost string `json:"ftp_host"`
FTPPort string `json:"ftp_port"`
FTPUser string `json:"ftp_username"`
FTPPassword string `json:"ftp_password"`
} `json:"slug_info"`
ImageInfo struct {
HubURL string `json:"hub_url"`
HubUser string `json:"hub_user"`
HubPassword string `json:"hub_password"`
Namespace string `json:"namespace"`
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
GroupID string `json:"group_id" `
ServiceIDs []string `json:"service_ids" `
Version string `json:"version"`
EventID string
SourceDir string `json:"source_dir"`
SourceType string `json:"source_type"`
BackupID string `json:"backup_id"`
BackupSize int64
Logger event.Logger
DockerClient *client.Client
//full-online,full-offline
Mode string `json:"mode"`
S3Config struct {
Provider string `json:"provider"`
Endpoint string `json:"endpoint"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
BucketName string `json:"bucket_name"`
} `json:"s3_config"`
}
func init() {
@ -175,34 +170,54 @@ func (b *BackupAPPNew) Run(timeout time.Duration) error {
return err
}
b.BackupSize += util.GetFileSize(fmt.Sprintf("%s.zip", b.SourceDir))
os.RemoveAll(b.SourceDir)
b.SourceDir = fmt.Sprintf("%s.zip", b.SourceDir)
//upload app backup data to online server(sftp) if mode is full-online
if b.Mode == "full-online" && b.SlugInfo.FTPHost != "" && b.SlugInfo.FTPPort != "" {
b.Logger.Info(fmt.Sprintf("Start uploading backup metadata to the cloud"), map[string]string{"step": "backup_builder", "status": "starting"})
sFTPClient, err := sources.NewSFTPClient(b.SlugInfo.FTPUser, b.SlugInfo.FTPPassword, b.SlugInfo.FTPHost, b.SlugInfo.FTPPort)
if err != nil {
b.Logger.Error(util.Translation("create ftp client error"), map[string]string{"step": "backup_builder", "status": "failure"})
return err
}
defer sFTPClient.Close()
dstDir := fmt.Sprintf("%s/backup/%s_%s/metadata_data.zip", b.SlugInfo.Namespace, b.GroupID, b.Version)
if err := sFTPClient.PushFile(b.SourceDir, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("push slug file to sftp server error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("push slug file error when backup app , %s", err.Error())
return err
}
//Statistical backup size
os.Remove(b.SourceDir)
b.SourceDir = dstDir
b.SourceType = "sftp"
if err := os.RemoveAll(b.SourceDir); err != nil {
logrus.Warningf("error removing temporary direcotry: %v", err)
}
b.SourceDir = fmt.Sprintf("%s.zip", b.SourceDir)
if err := b.uploadPkg(); err != nil {
return fmt.Errorf("error upload backup package: %v", err)
}
if err := b.updateBackupStatu("success"); err != nil {
return err
}
return nil
}
func (b *BackupAPPNew) uploadPkg() error {
if b.Mode != "full-online" {
return nil
}
defer func() {
if err := os.Remove(b.SourceDir); err != nil {
logrus.Warningf("error removing temporary file: %v", err)
}
}()
s3Provider, err := cloudos.Str2S3Provider(b.S3Config.Provider)
if err != nil {
return err
}
cfg := &cloudos.Config{
ProviderType: s3Provider,
Endpoint: b.S3Config.Endpoint,
AccessKey: b.S3Config.AccessKey,
SecretKey: b.S3Config.SecretKey,
BucketName: b.S3Config.BucketName,
}
cloudoser, err := cloudos.New(cfg)
if err != nil {
return fmt.Errorf("error creating cloudoser: %v", err)
}
_, filename := filepath.Split(b.SourceDir)
if err := cloudoser.PutObject(filename, b.SourceDir); err != nil {
return fmt.Errorf("object key: %s; filepath: %s; error putting object: %v", filename, b.SourceDir, err)
}
return nil
}
// judging whether the metadata structure is old or new, the new version is v5.1.8 and later
func judgeMetadataVersion(metadata []byte) (string, error) {
var appSnapshot AppSnapshot
@ -232,7 +247,7 @@ func (b *BackupAPPNew) backupServiceInfo(serviceInfos []*RegionServiceSnapshot)
version.FinalStatus = "lost"
continue
}
if err := b.uploadSlug(app, version); err != nil {
if err := b.saveSlugPkg(app, version); err != nil {
logrus.Errorf("upload app %s version %s slug file error.%s", app.Service.ServiceName, version.BuildVersion, err.Error())
} else {
backupVersionSize++
@ -243,7 +258,7 @@ func (b *BackupAPPNew) backupServiceInfo(serviceInfos []*RegionServiceSnapshot)
version.FinalStatus = "lost"
continue
}
if err := b.uploadImage(app, version); err != nil {
if err := b.saveImagePkg(app, version); err != nil {
logrus.Errorf("upload app %s version %s image error.%s", app.Service.ServiceName, version.BuildVersion, err.Error())
} else {
backupVersionSize++
@ -254,6 +269,7 @@ func (b *BackupAPPNew) backupServiceInfo(serviceInfos []*RegionServiceSnapshot)
b.Logger.Error(fmt.Sprintf("Application(%s) Backup build version failure.", app.Service.ServiceAlias), map[string]string{"step": "backup_builder", "status": "success"})
return fmt.Errorf("Application(%s) Backup build version failure", app.Service.ServiceAlias)
}
logrus.Infof("backup app %s %d version", app.Service.ServiceName, backupVersionSize)
b.Logger.Info(fmt.Sprintf("Complete backup application (%s) runtime %d version", app.Service.ServiceAlias, backupVersionSize), map[string]string{"step": "backup_builder", "status": "success"})
b.Logger.Info(fmt.Sprintf("Start backup application(%s) persistent data", app.Service.ServiceAlias), map[string]string{"step": "backup_builder", "status": "starting"})
@ -332,72 +348,31 @@ func (b *BackupAPPNew) checkVersionExist(version *dbmodel.VersionInfo) (bool, er
}
return false, fmt.Errorf("delivered type is invalid")
}
func (b *BackupAPPNew) uploadSlug(app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
if b.Mode == "full-online" && b.SlugInfo.FTPHost != "" && b.SlugInfo.FTPPort != "" {
sFTPClient, err := sources.NewSFTPClient(b.SlugInfo.FTPUser, b.SlugInfo.FTPPassword, b.SlugInfo.FTPHost, b.SlugInfo.FTPPort)
if err != nil {
b.Logger.Error(util.Translation("create ftp client error"), map[string]string{"step": "backup_builder", "status": "failure"})
return err
}
defer sFTPClient.Close()
dstDir := fmt.Sprintf("%s/backup/%s_%s/app_%s/%s.tgz", b.SlugInfo.Namespace, b.GroupID, b.Version, app.ServiceID, version.BuildVersion)
if err := sFTPClient.PushFile(version.DeliveredPath, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("push slug file to sftp server error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("push slug file error when backup app , %s", err.Error())
return err
}
//Statistical backup size
b.BackupSize += util.GetFileSize(version.DeliveredPath)
} else {
dstDir := fmt.Sprintf("%s/app_%s/slug_%s.tgz", b.SourceDir, app.ServiceID, version.BuildVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if err := sources.CopyFileWithProgress(version.DeliveredPath, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("push slug file to local dir error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("copy slug file error when backup app, %s", err.Error())
return err
}
// saveSlugPkg saves slug package on disk.
func (b *BackupAPPNew) saveSlugPkg(app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
dstDir := fmt.Sprintf("%s/app_%s/slug_%s.tgz", b.SourceDir, app.ServiceID, version.BuildVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if err := sources.CopyFileWithProgress(version.DeliveredPath, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("push slug file to local dir error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("copy slug file error when backup app, %s", err.Error())
return err
}
return nil
}
func (b *BackupAPPNew) uploadImage(app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
if b.Mode == "full-online" && b.ImageInfo.HubURL != "" {
backupImage, err := version.CreateShareImage(b.ImageInfo.HubURL, b.ImageInfo.Namespace, fmt.Sprintf("%s_backup", b.Version))
if err != nil {
return fmt.Errorf("create backup image error %s", err)
}
info, err := sources.ImagePull(b.DockerClient, version.DeliveredPath, "", "", b.Logger, 10)
if err != nil {
return fmt.Errorf("pull image when backup error %s", err)
}
if err := sources.ImageTag(b.DockerClient, version.DeliveredPath, backupImage, b.Logger, 1); err != nil {
return fmt.Errorf("change image tag when backup error %s", err)
}
if b.ImageInfo.IsTrust {
if err := sources.TrustedImagePush(b.DockerClient, backupImage, b.ImageInfo.HubUser, b.ImageInfo.HubPassword, b.Logger, 10); err != nil {
b.Logger.Error(util.Translation("save image to hub error"), map[string]string{"step": "backup_builder", "status": "failure"})
return fmt.Errorf("backup image push error %s", err)
}
} else {
if err := sources.ImagePush(b.DockerClient, backupImage, b.ImageInfo.HubUser, b.ImageInfo.HubPassword, b.Logger, 10); err != nil {
b.Logger.Error(util.Translation("save image to hub error"), map[string]string{"step": "backup_builder", "status": "failure"})
return fmt.Errorf("backup image push error %s", err)
}
}
b.BackupSize += info.Size
} else {
dstDir := fmt.Sprintf("%s/app_%s/image_%s.tar", b.SourceDir, app.ServiceID, version.BuildVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if _, err := sources.ImagePull(b.DockerClient, version.DeliveredPath, "", "", b.Logger, 20); err != nil {
b.Logger.Error(util.Translation("error pulling image"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf(fmt.Sprintf("image: %s; error pulling image: %v", version.DeliveredPath, err), version.DeliveredPath, err.Error())
}
if err := sources.ImageSave(b.DockerClient, version.DeliveredPath, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("save image to local dir error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("save image(%s) to local dir error when backup app, %s", version.DeliveredPath, err.Error())
return err
}
// saveSlugPkg saves image package on disk.
func (b *BackupAPPNew) saveImagePkg(app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
dstDir := fmt.Sprintf("%s/app_%s/image_%s.tar", b.SourceDir, app.ServiceID, version.BuildVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if _, err := sources.ImagePull(b.DockerClient, version.DeliveredPath, "", "", b.Logger, 20); err != nil {
b.Logger.Error(util.Translation("error pulling image"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf(fmt.Sprintf("image: %s; error pulling image: %v", version.DeliveredPath, err), version.DeliveredPath, err.Error())
}
if err := sources.ImageSave(b.DockerClient, version.DeliveredPath, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("save image to local dir error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("save image(%s) to local dir error when backup app, %s", version.DeliveredPath, err.Error())
return err
}
return nil
}

View File

@ -0,0 +1,61 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package exector
import (
"fmt"
"github.com/goodrain/rainbond/util"
"testing"
)
func TestUploadPkg(t *testing.T) {
b := &BackupAPPNew{
SourceDir: "/tmp/groupbackup/0d65c6608729438aad0a94f6317c80d0_20191024180024.zip",
Mode: "full-online",
}
b.S3Config.Provider = "AliyunOSS"
b.S3Config.Endpoint = "dummy"
b.S3Config.AccessKey = "dummy"
b.S3Config.SecretKey = "dummy"
b.S3Config.BucketName = "dummy"
if err := b.uploadPkg(); err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestUploadPkg2(t *testing.T) {
b := &BackupAPPRestore{}
b.S3Config.Provider = "alioss"
b.S3Config.Endpoint = "dummy"
b.S3Config.AccessKey = "dummy"
b.S3Config.SecretKey = "dummy"
b.S3Config.BucketName = "hrhtest"
cacheDir := fmt.Sprintf("/tmp/cache/tmp/%s/%s", "c6b05a2a6d664fda83dab8d3bcf1a941", util.NewUUID())
if err := util.CheckAndCreateDir(cacheDir); err != nil {
t.Errorf("create cache dir error %s", err.Error())
}
b.cacheDir = cacheDir
sourceDir := "/tmp/groupbackup/c6b05a2a6d664fda83dab8d3bcf1a941_20191024185643.zip"
if err := b.downloadFromS3(sourceDir); err != nil {
t.Error(err)
}
}

View File

@ -21,16 +21,10 @@ package exector
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/coreos/etcd/clientv3"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/cloudos"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/errors"
@ -39,26 +33,18 @@ import (
"github.com/goodrain/rainbond/util"
"github.com/pquerna/ffjson/ffjson"
"github.com/tidwall/gjson"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
)
//BackupAPPRestore restrore the group app backup
type BackupAPPRestore struct {
//full-online,full-offline
EventID string
SlugInfo struct {
Namespace string `json:"namespace"`
FTPHost string `json:"ftp_host"`
FTPPort string `json:"ftp_port"`
FTPUser string `json:"ftp_username"`
FTPPassword string `json:"ftp_password"`
} `json:"slug_info"`
ImageInfo struct {
HubURL string `json:"hub_url"`
HubUser string `json:"hub_user"`
HubPassword string `json:"hub_password"`
Namespace string `json:"namespace"`
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
BackupID string `json:"backup_id"`
TenantID string `json:"tenant_id"`
Logger event.Logger
@ -72,6 +58,14 @@ type BackupAPPRestore struct {
//serviceChange key: oldServiceID
serviceChange map[string]*Info
etcdcli *clientv3.Client
S3Config struct {
Provider string `json:"provider"`
Endpoint string `json:"endpoint"`
AccessKey string `json:"access_key"`
SecretKey string `json:"secret_key"`
BucketName string `json:"bucket_name"`
} `json:"s3_config"`
}
//Info service cache info
@ -110,24 +104,28 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
if err != nil {
return err
}
if backup.Status != "success" || backup.SourceDir == "" || backup.SourceType == "" {
if backup.Status != "success" || backup.SourceDir == "" || backup.BackupMode == "" {
return fmt.Errorf("backup can not be restore")
}
cacheDir := fmt.Sprintf("/grdata/cache/tmp/%s/%s", b.BackupID, b.EventID)
cacheDir := fmt.Sprintf("/grdata/cache/tmp/%s/%s", b.BackupID, util.NewUUID())
if err := util.CheckAndCreateDir(cacheDir); err != nil {
return fmt.Errorf("create cache dir error %s", err.Error())
}
b.cacheDir = cacheDir
switch backup.SourceType {
case "sftp":
b.downloadFromFTP(backup)
switch backup.BackupMode {
case "full-online":
if err := b.downloadFromS3(backup.SourceDir); err != nil {
return fmt.Errorf("error downloading file from s3: %v", err)
}
default:
b.downloadFromLocal(backup)
}
//read metadata file
metadata, err := ioutil.ReadFile(fmt.Sprintf("%s/region_apps_metadata.json", b.cacheDir))
metadata, err := ioutil.ReadFile(path.Join(b.cacheDir, "region_apps_metadata.json"))
if err != nil {
return err
return fmt.Errorf("error reading file: %v", err)
}
metaVersion, err := judgeMetadataVersion(metadata)
@ -152,6 +150,7 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
}
b.Logger.Info("读取备份元数据完成", map[string]string{"step": "restore_builder", "status": "running"})
logrus.Infof("backup id: %s; successfully read metadata.", b.BackupID)
//modify the metadata
if err := b.modify(&appSnapshot); err != nil {
return err
@ -161,6 +160,7 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
return err
}
b.Logger.Info("恢复备份元数据完成", map[string]string{"step": "restore_builder", "status": "success"})
logrus.Infof("backup id: %s; successfully restore metadata.", b.BackupID)
//If the following error occurs, delete the data from the database
//restore all app all build version and data
if err := b.restoreVersionAndData(backup, &appSnapshot); err != nil {
@ -169,6 +169,7 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
//save result
b.saveResult("success", "")
logrus.Infof("backup id: %s; successfully restore backup.", b.BackupID)
b.Logger.Info("恢复成功", map[string]string{"step": "restore_builder", "status": "success"})
return nil
}
@ -284,54 +285,21 @@ func (b *BackupAPPRestore) getOldServiceID(new string) string {
return ""
}
func (b *BackupAPPRestore) downloadSlug(backup *dbmodel.AppBackup, app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
if backup.BackupMode == "full-online" && b.SlugInfo.FTPHost != "" && b.SlugInfo.FTPPort != "" {
sFTPClient, err := sources.NewSFTPClient(b.SlugInfo.FTPUser, b.SlugInfo.FTPPassword, b.SlugInfo.FTPHost, b.SlugInfo.FTPPort)
if err != nil {
b.Logger.Error(util.Translation("create ftp client error"), map[string]string{"step": "restore_builder", "status": "failure"})
return err
}
defer sFTPClient.Close()
dstDir := fmt.Sprintf("%s/app_%s/%s.tgz", filepath.Dir(backup.SourceDir), b.getOldServiceID(app.ServiceID), version.BuildVersion)
if err := sFTPClient.DownloadFile(dstDir, version.DeliveredPath, b.Logger); err != nil {
b.Logger.Error(util.Translation("down slug file from sftp server error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("down %s slug file error when backup app , %s", dstDir, err.Error())
return err
}
} else {
dstDir := fmt.Sprintf("%s/app_%s/slug_%s.tgz", b.cacheDir, b.getOldServiceID(app.ServiceID), version.BuildVersion)
if err := sources.CopyFileWithProgress(dstDir, version.DeliveredPath, b.Logger); err != nil {
b.Logger.Error(util.Translation("down slug file from local dir error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("copy slug file error when backup app, %s", err.Error())
return err
}
dstDir := fmt.Sprintf("%s/app_%s/slug_%s.tgz", b.cacheDir, b.getOldServiceID(app.ServiceID), version.BuildVersion)
if err := sources.CopyFileWithProgress(dstDir, version.DeliveredPath, b.Logger); err != nil {
b.Logger.Error(util.Translation("down slug file from local dir error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("copy slug file error when backup app, %s", err.Error())
return err
}
return nil
}
func (b *BackupAPPRestore) downloadImage(backup *dbmodel.AppBackup, app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
if backup.BackupMode == "full-online" && b.ImageInfo.HubURL != "" {
backupImage, err := version.CreateShareImage(b.ImageInfo.HubURL, b.ImageInfo.Namespace, fmt.Sprintf("%s_backup", backup.Version))
if err != nil {
return fmt.Errorf("create backup image error %s", err)
}
if _, err := sources.ImagePull(b.DockerClient, backupImage, b.ImageInfo.HubUser, b.ImageInfo.HubPassword, b.Logger, 10); err != nil {
b.Logger.Error(util.Translation("pull image from hub error"), map[string]string{"step": "restore_builder", "status": "failure"})
return fmt.Errorf("restore backup image pull error %s", err)
}
if err := sources.ImageTag(b.DockerClient, backupImage, version.DeliveredPath, b.Logger, 1); err != nil {
return fmt.Errorf("change image tag when restore backup error %s", err)
}
err = sources.ImagePush(b.DockerClient, version.DeliveredPath, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 10)
if err != nil {
return fmt.Errorf("push image to local when restore backup error %s", err)
}
} else {
dstDir := fmt.Sprintf("%s/app_%s/image_%s.tar", b.cacheDir, b.getOldServiceID(app.ServiceID), version.BuildVersion)
if err := sources.ImageLoad(b.DockerClient, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("load image to local hub error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("load image to local hub error when restore backup app, %s", err.Error())
return err
}
dstDir := fmt.Sprintf("%s/app_%s/image_%s.tar", b.cacheDir, b.getOldServiceID(app.ServiceID), version.BuildVersion)
if err := sources.ImageLoad(b.DockerClient, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("load image to local hub error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("load image to local hub error when restore backup app, %s", err.Error())
return err
}
return nil
}
@ -606,31 +574,44 @@ func (b *BackupAPPRestore) downloadFromLocal(backup *dbmodel.AppBackup) error {
return nil
}
func (b *BackupAPPRestore) downloadFromFTP(backup *dbmodel.AppBackup) error {
sourceDir := backup.SourceDir
sFTPClient, err := sources.NewSFTPClient(b.SlugInfo.FTPUser, b.SlugInfo.FTPPassword, b.SlugInfo.FTPHost, b.SlugInfo.FTPPort)
func (b *BackupAPPRestore) downloadFromS3(sourceDir string) error {
s3Provider, err := cloudos.Str2S3Provider(b.S3Config.Provider)
if err != nil {
b.Logger.Error(util.Translation("create ftp client error"), map[string]string{"step": "backup_builder", "status": "failure"})
return err
}
defer sFTPClient.Close()
dstDir := fmt.Sprintf("%s/%s", b.cacheDir, filepath.Base(sourceDir))
if err := sFTPClient.DownloadFile(sourceDir, dstDir, b.Logger); err != nil {
b.Logger.Error(util.Translation("down slug file from sftp server error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("down slug file error when restore backup app , %s", err.Error())
return err
cfg := &cloudos.Config{
ProviderType: s3Provider,
Endpoint: b.S3Config.Endpoint,
AccessKey: b.S3Config.AccessKey,
SecretKey: b.S3Config.SecretKey,
BucketName: b.S3Config.BucketName,
}
err = util.Unzip(dstDir, b.cacheDir)
cloudoser, err := cloudos.New(cfg)
if err != nil {
b.Logger.Error(util.Translation("unzip metadata file error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("unzip file error when restore backup app , %s", err.Error())
return fmt.Errorf("error creating cloudoser: %v", err)
}
_, objectKey := filepath.Split(sourceDir)
disDir := path.Join(b.cacheDir, objectKey)
logrus.Debugf("object key: %s; file path: %s; start downloading backup file.", objectKey, disDir)
if err := cloudoser.GetObject(objectKey, disDir); err != nil {
return fmt.Errorf("object key: %s; file path: %s; error downloading file for object storage: %v", objectKey, disDir, err)
}
logrus.Debugf("successfully downloading backup file: %s", disDir)
err = util.Unzip(disDir, b.cacheDir)
if err != nil {
// b.Logger.Error(util.Translation("unzip metadata file error"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("error unzipping backup file: %v", err)
return err
}
dirs, err := util.GetDirNameList(b.cacheDir, 1)
if err != nil || len(dirs) < 1 {
b.Logger.Error(util.Translation("unzip metadata file error"), map[string]string{"step": "backup_builder", "status": "failure"})
// b.Logger.Error(util.Translation("unzip metadata file error"), map[string]string{"step": "backup_builder", "status": "failure"})
return fmt.Errorf("find metadata cache dir error after unzip file")
}
b.cacheDir = filepath.Join(b.cacheDir, dirs[0])
return nil
}

View File

@ -19,37 +19,14 @@
package exector
import (
"fmt"
"testing"
"github.com/pquerna/ffjson/ffjson"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
)
func TestDownloadFromLocal(t *testing.T) {
var b = BackupAPPRestore{
BackupID: "test",
EventID: "test",
Logger: event.GetTestLogger(),
}
cacheDir := fmt.Sprintf("/tmp/%s/%s", b.BackupID, b.EventID)
if err := util.CheckAndCreateDir(cacheDir); err != nil {
t.Fatal("create cache dir error", err.Error())
}
b.cacheDir = cacheDir
if err := b.downloadFromFTP(&dbmodel.AppBackup{
EventID: "test",
BackupID: "ccc",
}); err != nil {
t.Fatal("downloadFromLocal error", err.Error())
}
t.Log(b.cacheDir)
}
func TestModify(t *testing.T) {
var b = BackupAPPRestore{
BackupID: "test",

View File

@ -339,9 +339,11 @@ func (i *ImportApp) importPlugins() error {
return image
}
if oldimage, ok := plugin.CheckGet("share_image"); ok {
appKey, _ := plugin.Get("service_key").String()
i.oldPluginPath[appKey], _ = oldimage.String()
pluginID, _ := plugin.Get("plugin_id").String()
i.oldPluginPath[pluginID], _ = oldimage.String()
plugin.Set("share_image", getImageImage())
} else {
logrus.Warnf("plugin do not found share_image, skip it")
}
oldPlugins[index] = plugin
}
@ -387,7 +389,7 @@ func (i *ImportApp) importPlugins() error {
user := plugin.Get("plugin_image.hub_user").String()
pass := plugin.Get("plugin_image.hub_password").String()
// 上传之前先要根据新的仓库地址修改镜像名
image := i.oldPluginPath[plugin.Get("service_key").String()]
image := i.oldPluginPath[plugin.Get("plugin_id").String()]
imageName := sources.ImageNameWithNamespaceHandle(image)
saveImageName := fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, imageName.Name, imageName.Tag)
newImageName := plugin.Get("share_image").String()

View File

@ -114,21 +114,21 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, logger event.Logger)
} else {
buildOptions.NoCache = false
}
logger.Info("开始构建镜像", map[string]string{"step": "builder-exector"})
logger.Info("start build image", map[string]string{"step": "builder-exector"})
err := sources.ImageBuild(e.DockerClient, sourceDir, buildOptions, logger, 5)
if err != nil {
logger.Error(fmt.Sprintf("构造镜像%s失败: %s", buildImageName, err.Error()), map[string]string{"step": "builder-exector", "status": "failure"})
logger.Error(fmt.Sprintf("build image %s failure,find log in rbd-chaos", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("[plugin]build image error: %s", err.Error())
return err
}
logger.Info("镜像构建成功,开始推送镜像至仓库", map[string]string{"step": "builder-exector"})
logger.Info("build image success, start to push image to local image registry", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(e.DockerClient, buildImageName, builder.REGISTRYUSER, builder.REGISTRYPASS, logger, 2)
if err != nil {
logger.Error("推送镜像失败", map[string]string{"step": "builder-exector"})
logger.Error("push image failure, find log in rbd-chaos", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return err
}
logger.Info("推送镜像完成", map[string]string{"step": "build-exector"})
logger.Info("push image success", map[string]string{"step": "build-exector"})
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByDeployVersion(t.PluginID, t.VersionID, t.DeployVersion)
if err != nil {
logrus.Errorf("get version error, %v", err)
@ -140,7 +140,7 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, logger event.Logger)
logrus.Errorf("update version error, %v", err)
return err
}
logger.Info("从dockerfile构建插件完成", map[string]string{"step": "last", "status": "success"})
logger.Info("build plugin version by dockerfile success", map[string]string{"step": "last", "status": "success"})
return nil
}

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<packaging>pom</packaging>
<modules>
<module>rbd-worker</module>
<module>rbd-chaos</module>
</modules>
</project>

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>rbd-chaos</artifactId>
<name>rbd-chaos</name>
<packaging>foo</packaging>
</project>

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<name>rbd-worker</name>
<artifactId>rbd-worker</artifactId>
<packaging>war</packaging>
</project>

View File

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<groupId>com.github</groupId>
<artifactId>rainbond</artifactId>
<version>1.0.0</version>
<packaging>pom</packaging>
<modules>
<module>rbd-api</module>
<module>rbd-gateway</module>
<module>rbd-monitor</module>
<module>foobar</module>
</modules>
<build>
<finalName>${project.name}</finalName>
</build>
</project>

View File

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<name>rbd-api</name>
<artifactId>rbd-api</artifactId>
<packaging>jar</packaging>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<finalName>${project.name}</finalName>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>rbd-gateway</artifactId>
<name>rbd-gateway</name>
<packaging>pom</packaging>
</project>

View File

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>rbd-monitor</artifactId>
<build>
<finalName>${abc}</finalName>
</build>
</project>

View File

@ -43,7 +43,7 @@ import (
"github.com/goodrain/rainbond/util"
netssh "golang.org/x/crypto/ssh"
sshkey "golang.org/x/crypto/ssh"
"gopkg.in/src-d/go-git.v4"
git "gopkg.in/src-d/go-git.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/client"
@ -101,6 +101,14 @@ func RemoveDir(path string) error {
}
return os.RemoveAll(path)
}
func getShowURL(rurl string) string {
urlpath, _ := url.Parse(rurl)
if urlpath != nil {
showURL := fmt.Sprintf("%s://%s%s", urlpath.Scheme, urlpath.Host, urlpath.Path)
return showURL
}
return ""
}
//GitClone git clone code
func GitClone(csi CodeSourceInfo, sourceDir string, logger event.Logger, timeout int) (*git.Repository, error) {
@ -108,7 +116,8 @@ func GitClone(csi CodeSourceInfo, sourceDir string, logger event.Logger, timeout
flag := true
Loop:
if logger != nil {
logger.Info(fmt.Sprintf("Start clone source code from %s", csi.RepositoryURL), map[string]string{"step": "clone_code"})
//Hide possible account key information
logger.Info(fmt.Sprintf("Start clone source code from %s", getShowURL(csi.RepositoryURL)), map[string]string{"step": "clone_code"})
}
ep, err := transport.NewEndpoint(csi.RepositoryURL)
if err != nil {

View File

@ -103,3 +103,7 @@ func TestGetCodeCacheDir(t *testing.T) {
}
t.Log(csi.GetCodeSourceDir())
}
func TestGetShowURL(t *testing.T) {
t.Log(getShowURL("https://zsl1526:79890ffc74014b34b49040d42b95d5af@github.com:9090/zsl1549/python-demo.git"))
}

View File

@ -31,18 +31,14 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
"golang.org/x/net/context"
//"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types"
//"github.com/docker/docker/client"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/model"
"github.com/goodrain/rainbond/event"
"golang.org/x/net/context"
)
//ErrorNoAuth error no auth
@ -51,19 +47,18 @@ var ErrorNoAuth = fmt.Errorf("pull image require docker login")
//ErrorNoImage error no image
var ErrorNoImage = fmt.Errorf("image not exist")
//ImagePull 拉取镜像
//timeout 分钟为单位
//ImagePull pull docker image
//timeout minutes of the unit
func ImagePull(dockerCli *client.Client, image string, username, password string, logger event.Logger, timeout int) (*types.ImageInspect, error) {
if logger != nil {
//进度信息
logger.Info(fmt.Sprintf("开始获取镜像:%s", image), map[string]string{"step": "pullimage"})
logger.Info(fmt.Sprintf("start get image:%s", image), map[string]string{"step": "pullimage"})
}
var pullipo types.ImagePullOptions
if username != "" && password != "" {
auth, err := EncodeAuthToBase64(types.AuthConfig{Username: username, Password: password})
if err != nil {
logrus.Errorf("make auth base63 push image error: %s", err.Error())
logger.Error(fmt.Sprintf("生成获取镜像的Token失败"), map[string]string{"step": "builder-exector", "status": "failure"})
logger.Error(fmt.Sprintf("Failed to generate a Token to get the image"), map[string]string{"step": "builder-exector", "status": "failure"})
return nil, err
}
pullipo = types.ImagePullOptions{
@ -90,7 +85,7 @@ func ImagePull(dockerCli *client.Client, image string, username, password string
logrus.Debugf("image name: %s readcloser error: %v", image, err.Error())
if strings.HasSuffix(err.Error(), "does not exist or no pull access") {
if logger != nil {
logger.Error(fmt.Sprintf("镜像:%s不存在或无权获取", image), map[string]string{"step": "pullimage", "status": "failure"})
logger.Error(fmt.Sprintf("image: %s does not exist or is not available", image), map[string]string{"step": "pullimage", "status": "failure"})
}
return nil, fmt.Errorf("Image(%s) does not exist or no pull access", image)
}
@ -131,11 +126,10 @@ func ImagePull(dockerCli *client.Client, image string, username, password string
return &ins, nil
}
//ImageTag 修改镜像tag
//ImageTag change docker image tag
func ImageTag(dockerCli *client.Client, source, target string, logger event.Logger, timeout int) error {
if logger != nil {
//进度信息
logger.Info(fmt.Sprintf("开始修改镜像tag%s -> %s", source, target), map[string]string{"step": "changetag"})
logger.Info(fmt.Sprintf("change image tag%s -> %s", source, target), map[string]string{"step": "changetag"})
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*time.Duration(timeout))
defer cancel()
@ -144,7 +138,7 @@ func ImageTag(dockerCli *client.Client, source, target string, logger event.Logg
logrus.Debugf("image tag err: %s", err.Error())
return err
}
logger.Info("镜像tag修改完成", map[string]string{"step": "changetag"})
logger.Info("change image tag success", map[string]string{"step": "changetag"})
return nil
}
@ -215,14 +209,12 @@ func GenSaveImageName(name string) string {
return fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, imageName.Name, imageName.Tag)
}
//ImagePush 推送镜像
//timeout 分钟为单位
//ImagePush push image to registry
//timeout minutes of the unit
func ImagePush(dockerCli *client.Client, image, user, pass string, logger event.Logger, timeout int) error {
if logger != nil {
//进度信息
logger.Info(fmt.Sprintf("开始推送镜像:%s", image), map[string]string{"step": "pushimage"})
logger.Info(fmt.Sprintf("start push image%s", image), map[string]string{"step": "pushimage"})
}
//最少一分钟
if timeout < 1 {
timeout = 1
}
@ -245,7 +237,7 @@ func ImagePush(dockerCli *client.Client, image, user, pass string, logger event.
if err != nil {
logrus.Errorf("make auth base63 push image error: %s", err.Error())
if logger != nil {
logger.Error(fmt.Sprintf("生成获取镜像的Token失败"), map[string]string{"step": "builder-exector", "status": "failure"})
logger.Error(fmt.Sprintf("Failed to generate a token to get the image"), map[string]string{"step": "builder-exector", "status": "failure"})
}
return err
}
@ -256,13 +248,13 @@ func ImagePush(dockerCli *client.Client, image, user, pass string, logger event.
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
if logger != nil {
logger.Error(fmt.Sprintf("镜像:%s不存在不能推送", image), map[string]string{"step": "pushimage", "status": "failure"})
logger.Error(fmt.Sprintf("image %s does not exist, cannot be pushed", image), map[string]string{"step": "pushimage", "status": "failure"})
}
return fmt.Errorf("Image(%s) does not exist", image)
}
return err
}
logger.Info(fmt.Sprintf("成功推送镜像%s", image), map[string]string{"step": "pushimage"})
logger.Info(fmt.Sprintf("success push image%s", image), map[string]string{"step": "pushimage"})
if readcloser != nil {
defer readcloser.Close()
dec := json.NewDecoder(readcloser)

View File

@ -70,7 +70,7 @@ func (a *Builder) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&a.MysqlConnectionInfo, "mysql", "root:admin@tcp(127.0.0.1:3306)/region", "mysql db connection info")
fs.StringSliceVar(&a.EventLogServers, "event-servers", []string{"127.0.0.1:6366"}, "event log server address. simple lb")
fs.StringVar(&a.KubeConfig, "kube-config", "/opt/rainbond/etc/kubernetes/kubecfg/admin.kubeconfig", "kubernetes api server config file")
fs.IntVar(&a.MaxTasks, "max-tasks", 50, "the max tasks for per node")
fs.IntVar(&a.MaxTasks, "max-tasks", 0, "Maximum number of simultaneous build tasksIf set to 0, the maximum limit is twice the number of CPU cores")
fs.IntVar(&a.APIPort, "api-port", 3228, "the port for api server")
fs.StringVar(&a.MQAPI, "mq-api", "127.0.0.1:6300", "acp_mq api")
fs.StringVar(&a.RunMode, "run", "sync", "sync data when worker start")

View File

@ -147,12 +147,6 @@ func (s *LogServer) InitLog() {
}
}
log.Formatter = &logrus.TextFormatter{}
// hook, err := logrus_mail.NewMailHook("EventLog", "HOST", 25, "FROM", "TO")
// if err != nil {
// log.Error("Create mail hook for log error.", err.Error())
// } else {
// log.Hooks.Add(hook)
// }
s.Logger = log
}

View File

@ -22,7 +22,11 @@ import (
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/goodrain/rainbond/monitor/custom"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/cmd/monitor/option"
@ -46,6 +50,9 @@ func main() {
p := prometheus.NewManager(c, a)
controllerManager := controller.NewControllerManager(a, p)
monitorMysql(c, p)
monitorKSM(c, p)
errChan := make(chan error, 1)
defer close(errChan)
p.StartDaemon(errChan)
@ -79,3 +86,19 @@ func main() {
}
logrus.Info("See you next time!")
}
func monitorMysql(c *option.Config, p *prometheus.Manager) {
if strings.TrimSpace(c.MysqldExporter) != "" {
metrics := strings.TrimSpace(c.MysqldExporter)
logrus.Infof("add mysql metrics[%s] into prometheus", metrics)
custom.AddMetrics(p, custom.Metrics{Name: "mysql", Path: "/metrics", Metrics: []string{metrics}, Interval: 30 * time.Second, Timeout: 15 * time.Second})
}
}
func monitorKSM(c *option.Config, p *prometheus.Manager) {
if strings.TrimSpace(c.KSMExporter) != "" {
metrics := strings.TrimSpace(c.KSMExporter)
logrus.Infof("add kube-state-metrics[%s] into prometheus", metrics)
custom.AddMetrics(p, custom.Metrics{Name: "kubernetes", Path: "/metrics", Metrics: []string{metrics}, Interval: 30 * time.Second, Timeout: 10 * time.Second})
}
}

View File

@ -52,6 +52,8 @@ type Config struct {
QueryTimeout string
QueryMaxConcurrency string
CadvisorListenPort int
MysqldExporter string
KSMExporter string
}
// Options for the web Handler.
@ -133,6 +135,8 @@ func (c *Config) AddFlag(cmd *pflag.FlagSet) {
cmd.StringVar(&c.AdvertiseAddr, "advertise-addr", c.AdvertiseAddr, "advertise address, and registry into etcd.")
cmd.IntVar(&c.CadvisorListenPort, "cadvisor-listen-port", c.CadvisorListenPort, "kubelet cadvisor listen port in all node")
cmd.StringSliceVar(&c.AlertManagerUrl, "alertmanager-address", c.AlertManagerUrl, "AlertManager url.")
cmd.StringVar(&c.MysqldExporter, "mysqld-exporter", c.MysqldExporter, "mysqld exporter address. eg: 127.0.0.1:9104")
cmd.StringVar(&c.KSMExporter, "kube-state-metrics", c.KSMExporter, "kube-state-metrics, current server's kube-state-metrics address")
}
//AddPrometheusFlag prometheus flag

View File

@ -32,7 +32,10 @@ func main() {
server.ParseClientCommnad(os.Args)
option.Config.AddFlags(pflag.CommandLine)
server.InstallServiceFlags(pflag.CommandLine)
option.Init()
if err := option.Init(); err != nil {
fmt.Fprintf(os.Stderr, "init config error: %v\n", err)
os.Exit(1)
}
if err := server.Run(option.Config); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)

View File

@ -45,7 +45,7 @@ var (
exitChan = make(chan struct{})
)
//Init 初始化
//Init init config
func Init() error {
if initialized {
return nil
@ -150,7 +150,7 @@ func (a *Conf) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&a.Etcd.DialTimeout, "etcd-dialTimeOut", 3, "etcd cluster dialTimeOut In seconds")
fs.IntVar(&a.ReqTimeout, "reqTimeOut", 2, "req TimeOut.")
fs.Int64Var(&a.TTL, "ttl", 10, "Frequency of node status reporting to master")
fs.StringVar(&a.APIAddr, "api-addr", ":6100", "The node api server listen address")
//fs.StringVar(&a.APIAddr, "api-addr", ":6100", "The node api server listen address")
fs.StringVar(&a.GrpcAPIAddr, "grpc-api-addr", ":6101", "The node grpc api server listen address")
fs.StringVar(&a.K8SConfPath, "kube-conf", "/opt/rainbond/etc/kubernetes/kubecfg/admin.kubeconfig", "absolute path to the kubeconfig file ./kubeconfig")
fs.StringVar(&a.RunMode, "run-mode", "worker", "the acp_node run mode,could be 'worker' or 'master'")
@ -228,11 +228,21 @@ func (a *Conf) parse() error {
} else {
a.Etcd.DialTimeout = a.Etcd.DialTimeout * time.Second
}
a.Etcd.Context = context.Background()
if a.TTL <= 0 {
a.TTL = 10
}
a.LockPath = "/rainbond/lock"
if a.HostIP == "" || !util.CheckIP(a.HostIP) {
localIP, err := util.LocalIP()
if localIP == nil || err != nil {
return fmt.Errorf("can not find ip of this node")
}
a.HostIP = localIP.String()
}
//init api listen port, can not custom
if a.APIAddr == "" {
a.APIAddr = ":6100"
}
return nil
}

View File

@ -25,6 +25,10 @@ import (
"github.com/Sirupsen/logrus"
"github.com/eapache/channels"
kubeaggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/config"
@ -37,8 +41,6 @@ import (
"github.com/goodrain/rainbond/worker/master"
"github.com/goodrain/rainbond/worker/monitor"
"github.com/goodrain/rainbond/worker/server"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
//Run start run
@ -76,7 +78,12 @@ func Run(s *option.Worker) error {
return err
}
s.Config.KubeClient = clientset
//etcdCli, err := client.New(client.Config{})
kubeaggregatorclientset, err := kubeaggregatorclientset.NewForConfig(c)
if err != nil {
logrus.Error("kube aggregator; read kube config file error.", err)
return err
}
//step 3: create resource store
startCh := channels.NewRingChannel(1024)
@ -98,7 +105,7 @@ func Run(s *option.Worker) error {
defer controllerManager.Stop()
//step 5 : start runtime master
masterCon, err := master.NewMasterController(s.Config, cachestore)
masterCon, err := master.NewMasterController(s.Config, cachestore, kubeaggregatorclientset)
if err != nil {
return err
}

View File

@ -52,6 +52,7 @@ type TenantDao interface {
GetTenantIDsByNames(names []string) ([]string, error)
GetTenantLimitsByNames(names []string) (map[string]int, error)
GetTenantByUUIDIsExist(uuid string) bool
DelByTenantID(tenantID string) error
}
//AppDao tenant dao
@ -87,6 +88,7 @@ type TenantServiceDao interface {
GetAllServicesID() ([]*model.TenantServices, error)
UpdateDeployVersion(serviceID, deployversion string) error
ListThirdPartyServices() ([]*model.TenantServices, error)
ListServicesByTenantID(tenantID string) ([]*model.TenantServices, error)
}
//TenantServiceDeleteDao TenantServiceDeleteDao
@ -119,6 +121,7 @@ type TenantPluginDao interface {
DeletePluginByID(pluginID, tenantID string) error
GetPluginsByTenantID(tenantID string) ([]*model.TenantPlugin, error)
ListByIDs(ids []string) ([]*model.TenantPlugin, error)
ListByTenantID(tenantID string) ([]*model.TenantPlugin, error)
}
//TenantPluginDefaultENVDao TenantPluginDefaultENVDao
@ -451,3 +454,27 @@ type GwRuleConfigDao interface {
DeleteByRuleID(rid string) error
ListByRuleID(rid string) ([]*model.GwRuleConfig, error)
}
// TenantServceAutoscalerRulesDao -
type TenantServceAutoscalerRulesDao interface {
Dao
GetByRuleID(ruleID string) (*model.TenantServiceAutoscalerRules, error)
ListByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error)
ListEnableOnesByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error)
}
// TenantServceAutoscalerRuleMetricsDao -
type TenantServceAutoscalerRuleMetricsDao interface {
Dao
UpdateOrCreate(metric *model.TenantServiceAutoscalerRuleMetrics) error
ListByRuleID(ruleID string) ([]*model.TenantServiceAutoscalerRuleMetrics, error)
DeleteByRuleID(ruldID string) error
}
// TenantServiceScalingRecordsDao -
type TenantServiceScalingRecordsDao interface {
Dao
UpdateOrCreate(new *model.TenantServiceScalingRecords) error
ListByServiceID(serviceID string, offset, limit int) ([]*model.TenantServiceScalingRecords, error)
CountByServiceID(serviceID string) (int, error)
}

View File

@ -274,6 +274,20 @@ func (mr *MockTenantDaoMockRecorder) GetTenantByUUIDIsExist(uuid interface{}) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTenantByUUIDIsExist", reflect.TypeOf((*MockTenantDao)(nil).GetTenantByUUIDIsExist), uuid)
}
// DelByTenantID mocks base method
func (m *MockTenantDao) DelByTenantID(tenantID string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DelByTenantID", tenantID)
ret0, _ := ret[0].(error)
return ret0
}
// DelByTenantID indicates an expected call of DelByTenantID
func (mr *MockTenantDaoMockRecorder) DelByTenantID(tenantID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DelByTenantID", reflect.TypeOf((*MockTenantDao)(nil).DelByTenantID), tenantID)
}
// MockAppDao is a mock of AppDao interface
type MockAppDao struct {
ctrl *gomock.Controller
@ -709,6 +723,21 @@ func (mr *MockTenantServiceDaoMockRecorder) ListThirdPartyServices() *gomock.Cal
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListThirdPartyServices", reflect.TypeOf((*MockTenantServiceDao)(nil).ListThirdPartyServices))
}
// ListServicesByTenantID mocks base method
func (m *MockTenantServiceDao) ListServicesByTenantID(tenantID string) ([]*model.TenantServices, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListServicesByTenantID", tenantID)
ret0, _ := ret[0].([]*model.TenantServices)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListServicesByTenantID indicates an expected call of ListServicesByTenantID
func (mr *MockTenantServiceDaoMockRecorder) ListServicesByTenantID(tenantID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServicesByTenantID", reflect.TypeOf((*MockTenantServiceDao)(nil).ListServicesByTenantID), tenantID)
}
// MockTenantServiceDeleteDao is a mock of TenantServiceDeleteDao interface
type MockTenantServiceDeleteDao struct {
ctrl *gomock.Controller
@ -1101,6 +1130,21 @@ func (mr *MockTenantPluginDaoMockRecorder) ListByIDs(ids interface{}) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByIDs", reflect.TypeOf((*MockTenantPluginDao)(nil).ListByIDs), ids)
}
// ListByTenantID mocks base method
func (m *MockTenantPluginDao) ListByTenantID(tenantID string) ([]*model.TenantPlugin, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByTenantID", tenantID)
ret0, _ := ret[0].([]*model.TenantPlugin)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListByTenantID indicates an expected call of ListByTenantID
func (mr *MockTenantPluginDaoMockRecorder) ListByTenantID(tenantID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByTenantID", reflect.TypeOf((*MockTenantPluginDao)(nil).ListByTenantID), tenantID)
}
// MockTenantPluginDefaultENVDao is a mock of TenantPluginDefaultENVDao interface
type MockTenantPluginDefaultENVDao struct {
ctrl *gomock.Controller
@ -5006,3 +5050,246 @@ func (mr *MockGwRuleConfigDaoMockRecorder) ListByRuleID(rid interface{}) *gomock
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByRuleID", reflect.TypeOf((*MockGwRuleConfigDao)(nil).ListByRuleID), rid)
}
// MockTenantServceAutoscalerRulesDao is a mock of TenantServceAutoscalerRulesDao interface
type MockTenantServceAutoscalerRulesDao struct {
ctrl *gomock.Controller
recorder *MockTenantServceAutoscalerRulesDaoMockRecorder
}
// MockTenantServceAutoscalerRulesDaoMockRecorder is the mock recorder for MockTenantServceAutoscalerRulesDao
type MockTenantServceAutoscalerRulesDaoMockRecorder struct {
mock *MockTenantServceAutoscalerRulesDao
}
// NewMockTenantServceAutoscalerRulesDao creates a new mock instance
func NewMockTenantServceAutoscalerRulesDao(ctrl *gomock.Controller) *MockTenantServceAutoscalerRulesDao {
mock := &MockTenantServceAutoscalerRulesDao{ctrl: ctrl}
mock.recorder = &MockTenantServceAutoscalerRulesDaoMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTenantServceAutoscalerRulesDao) EXPECT() *MockTenantServceAutoscalerRulesDaoMockRecorder {
return m.recorder
}
// AddModel mocks base method
func (m *MockTenantServceAutoscalerRulesDao) AddModel(arg0 model.Interface) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddModel", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// AddModel indicates an expected call of AddModel
func (mr *MockTenantServceAutoscalerRulesDaoMockRecorder) AddModel(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddModel", reflect.TypeOf((*MockTenantServceAutoscalerRulesDao)(nil).AddModel), arg0)
}
// UpdateModel mocks base method
func (m *MockTenantServceAutoscalerRulesDao) UpdateModel(arg0 model.Interface) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateModel", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateModel indicates an expected call of UpdateModel
func (mr *MockTenantServceAutoscalerRulesDaoMockRecorder) UpdateModel(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateModel", reflect.TypeOf((*MockTenantServceAutoscalerRulesDao)(nil).UpdateModel), arg0)
}
// GetByRuleID mocks base method
func (m *MockTenantServceAutoscalerRulesDao) GetByRuleID(ruleID string) (*model.TenantServiceAutoscalerRules, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetByRuleID", ruleID)
ret0, _ := ret[0].(*model.TenantServiceAutoscalerRules)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetByRuleID indicates an expected call of GetByRuleID
func (mr *MockTenantServceAutoscalerRulesDaoMockRecorder) GetByRuleID(ruleID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByRuleID", reflect.TypeOf((*MockTenantServceAutoscalerRulesDao)(nil).GetByRuleID), ruleID)
}
// ListByServiceID mocks base method
func (m *MockTenantServceAutoscalerRulesDao) ListByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByServiceID", serviceID)
ret0, _ := ret[0].([]*model.TenantServiceAutoscalerRules)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListByServiceID indicates an expected call of ListByServiceID
func (mr *MockTenantServceAutoscalerRulesDaoMockRecorder) ListByServiceID(serviceID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServiceID", reflect.TypeOf((*MockTenantServceAutoscalerRulesDao)(nil).ListByServiceID), serviceID)
}
// ListEnableOnesByServiceID mocks base method
func (m *MockTenantServceAutoscalerRulesDao) ListEnableOnesByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListEnableOnesByServiceID", serviceID)
ret0, _ := ret[0].([]*model.TenantServiceAutoscalerRules)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListEnableOnesByServiceID indicates an expected call of ListEnableOnesByServiceID
func (mr *MockTenantServceAutoscalerRulesDaoMockRecorder) ListEnableOnesByServiceID(serviceID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEnableOnesByServiceID", reflect.TypeOf((*MockTenantServceAutoscalerRulesDao)(nil).ListEnableOnesByServiceID), serviceID)
}
// MockTenantServceAutoscalerRuleMetricsDao is a mock of TenantServceAutoscalerRuleMetricsDao interface
type MockTenantServceAutoscalerRuleMetricsDao struct {
ctrl *gomock.Controller
recorder *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder
}
// MockTenantServceAutoscalerRuleMetricsDaoMockRecorder is the mock recorder for MockTenantServceAutoscalerRuleMetricsDao
type MockTenantServceAutoscalerRuleMetricsDaoMockRecorder struct {
mock *MockTenantServceAutoscalerRuleMetricsDao
}
// NewMockTenantServceAutoscalerRuleMetricsDao creates a new mock instance
func NewMockTenantServceAutoscalerRuleMetricsDao(ctrl *gomock.Controller) *MockTenantServceAutoscalerRuleMetricsDao {
mock := &MockTenantServceAutoscalerRuleMetricsDao{ctrl: ctrl}
mock.recorder = &MockTenantServceAutoscalerRuleMetricsDaoMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTenantServceAutoscalerRuleMetricsDao) EXPECT() *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder {
return m.recorder
}
// AddModel mocks base method
func (m *MockTenantServceAutoscalerRuleMetricsDao) AddModel(arg0 model.Interface) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddModel", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// AddModel indicates an expected call of AddModel
func (mr *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder) AddModel(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddModel", reflect.TypeOf((*MockTenantServceAutoscalerRuleMetricsDao)(nil).AddModel), arg0)
}
// UpdateModel mocks base method
func (m *MockTenantServceAutoscalerRuleMetricsDao) UpdateModel(arg0 model.Interface) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateModel", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateModel indicates an expected call of UpdateModel
func (mr *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder) UpdateModel(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateModel", reflect.TypeOf((*MockTenantServceAutoscalerRuleMetricsDao)(nil).UpdateModel), arg0)
}
// UpdateOrCreate mocks base method
func (m *MockTenantServceAutoscalerRuleMetricsDao) UpdateOrCreate(metric *model.TenantServiceAutoscalerRuleMetrics) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateOrCreate", metric)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateOrCreate indicates an expected call of UpdateOrCreate
func (mr *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder) UpdateOrCreate(metric interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrCreate", reflect.TypeOf((*MockTenantServceAutoscalerRuleMetricsDao)(nil).UpdateOrCreate), metric)
}
// ListByRuleID mocks base method
func (m *MockTenantServceAutoscalerRuleMetricsDao) ListByRuleID(ruleID string) ([]*model.TenantServiceAutoscalerRuleMetrics, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByRuleID", ruleID)
ret0, _ := ret[0].([]*model.TenantServiceAutoscalerRuleMetrics)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListByRuleID indicates an expected call of ListByRuleID
func (mr *MockTenantServceAutoscalerRuleMetricsDaoMockRecorder) ListByRuleID(ruleID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByRuleID", reflect.TypeOf((*MockTenantServceAutoscalerRuleMetricsDao)(nil).ListByRuleID), ruleID)
}
// MockTenantServiceScalingRecordsDao is a mock of TenantServiceScalingRecordsDao interface
type MockTenantServiceScalingRecordsDao struct {
ctrl *gomock.Controller
recorder *MockTenantServiceScalingRecordsDaoMockRecorder
}
// MockTenantServiceScalingRecordsDaoMockRecorder is the mock recorder for MockTenantServiceScalingRecordsDao
type MockTenantServiceScalingRecordsDaoMockRecorder struct {
mock *MockTenantServiceScalingRecordsDao
}
// NewMockTenantServiceScalingRecordsDao creates a new mock instance
func NewMockTenantServiceScalingRecordsDao(ctrl *gomock.Controller) *MockTenantServiceScalingRecordsDao {
mock := &MockTenantServiceScalingRecordsDao{ctrl: ctrl}
mock.recorder = &MockTenantServiceScalingRecordsDaoMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockTenantServiceScalingRecordsDao) EXPECT() *MockTenantServiceScalingRecordsDaoMockRecorder {
return m.recorder
}
// UpdateOrCreate mocks base method
func (m *MockTenantServiceScalingRecordsDao) UpdateOrCreate(new *model.TenantServiceScalingRecords) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateOrCreate", new)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateOrCreate indicates an expected call of UpdateOrCreate
func (mr *MockTenantServiceScalingRecordsDaoMockRecorder) UpdateOrCreate(new interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrCreate", reflect.TypeOf((*MockTenantServiceScalingRecordsDao)(nil).UpdateOrCreate), new)
}
// ListByServiceID mocks base method
func (m *MockTenantServiceScalingRecordsDao) ListByServiceID(serviceID string, offset, limit int) ([]*model.TenantServiceScalingRecords, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListByServiceID", serviceID, offset, limit)
ret0, _ := ret[0].([]*model.TenantServiceScalingRecords)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListByServiceID indicates an expected call of ListByServiceID
func (mr *MockTenantServiceScalingRecordsDaoMockRecorder) ListByServiceID(serviceID, offset, limit interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByServiceID", reflect.TypeOf((*MockTenantServiceScalingRecordsDao)(nil).ListByServiceID), serviceID, offset, limit)
}
// CountByServiceID mocks base method
func (m *MockTenantServiceScalingRecordsDao) CountByServiceID(serviceID string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CountByServiceID", serviceID)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CountByServiceID indicates an expected call of CountByServiceID
func (mr *MockTenantServiceScalingRecordsDaoMockRecorder) CountByServiceID(serviceID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountByServiceID", reflect.TypeOf((*MockTenantServiceScalingRecordsDao)(nil).CountByServiceID), serviceID)
}

View File

@ -32,6 +32,7 @@ import (
type Manager interface {
CloseManager() error
Begin() *gorm.DB
EnsureEndTransactionFunc() func(tx *gorm.DB)
LicenseDao() dao.LicenseDao
AppDao() dao.AppDao
TenantDao() dao.TenantDao
@ -94,6 +95,7 @@ type Manager interface {
NotificationEventDao() dao.NotificationEventDao
AppBackupDao() dao.AppBackupDao
AppBackupDaoTransactions(db *gorm.DB) dao.AppBackupDao
ServiceSourceDao() dao.ServiceSourceDao
// gateway
@ -113,6 +115,13 @@ type Manager interface {
EndpointsDaoTransactions(db *gorm.DB) dao.EndpointsDao
ThirdPartySvcDiscoveryCfgDao() dao.ThirdPartySvcDiscoveryCfgDao
ThirdPartySvcDiscoveryCfgDaoTransactions(db *gorm.DB) dao.ThirdPartySvcDiscoveryCfgDao
TenantServceAutoscalerRulesDao() dao.TenantServceAutoscalerRulesDao
TenantServceAutoscalerRulesDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRulesDao
TenantServceAutoscalerRuleMetricsDao() dao.TenantServceAutoscalerRuleMetricsDao
TenantServceAutoscalerRuleMetricsDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRuleMetricsDao
TenantServiceScalingRecordsDao() dao.TenantServiceScalingRecordsDao
TenantServiceScalingRecordsDaoTransactions(db *gorm.DB) dao.TenantServiceScalingRecordsDao
}
var defaultManager Manager

View File

@ -62,6 +62,20 @@ func (mr *MockManagerMockRecorder) Begin() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Begin", reflect.TypeOf((*MockManager)(nil).Begin))
}
// EnsureEndTransactionFunc mocks base method
func (m *MockManager) EnsureEndTransactionFunc() func(*gorm.DB) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EnsureEndTransactionFunc")
ret0, _ := ret[0].(func(*gorm.DB))
return ret0
}
// EnsureEndTransactionFunc indicates an expected call of EnsureEndTransactionFunc
func (mr *MockManagerMockRecorder) EnsureEndTransactionFunc() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureEndTransactionFunc", reflect.TypeOf((*MockManager)(nil).EnsureEndTransactionFunc))
}
// LicenseDao mocks base method
func (m *MockManager) LicenseDao() dao.LicenseDao {
m.ctrl.T.Helper()
@ -832,6 +846,20 @@ func (mr *MockManagerMockRecorder) AppBackupDao() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppBackupDao", reflect.TypeOf((*MockManager)(nil).AppBackupDao))
}
// AppBackupDaoTransactions mocks base method
func (m *MockManager) AppBackupDaoTransactions(db *gorm.DB) dao.AppBackupDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AppBackupDaoTransactions", db)
ret0, _ := ret[0].(dao.AppBackupDao)
return ret0
}
// AppBackupDaoTransactions indicates an expected call of AppBackupDaoTransactions
func (mr *MockManagerMockRecorder) AppBackupDaoTransactions(db interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppBackupDaoTransactions", reflect.TypeOf((*MockManager)(nil).AppBackupDaoTransactions), db)
}
// ServiceSourceDao mocks base method
func (m *MockManager) ServiceSourceDao() dao.ServiceSourceDao {
m.ctrl.T.Helper()
@ -1041,3 +1069,87 @@ func (mr *MockManagerMockRecorder) ThirdPartySvcDiscoveryCfgDaoTransactions(db i
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ThirdPartySvcDiscoveryCfgDaoTransactions", reflect.TypeOf((*MockManager)(nil).ThirdPartySvcDiscoveryCfgDaoTransactions), db)
}
// TenantServceAutoscalerRulesDao mocks base method
func (m *MockManager) TenantServceAutoscalerRulesDao() dao.TenantServceAutoscalerRulesDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServceAutoscalerRulesDao")
ret0, _ := ret[0].(dao.TenantServceAutoscalerRulesDao)
return ret0
}
// TenantServceAutoscalerRulesDao indicates an expected call of TenantServceAutoscalerRulesDao
func (mr *MockManagerMockRecorder) TenantServceAutoscalerRulesDao() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServceAutoscalerRulesDao", reflect.TypeOf((*MockManager)(nil).TenantServceAutoscalerRulesDao))
}
// TenantServceAutoscalerRulesDaoTransactions mocks base method
func (m *MockManager) TenantServceAutoscalerRulesDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRulesDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServceAutoscalerRulesDaoTransactions", db)
ret0, _ := ret[0].(dao.TenantServceAutoscalerRulesDao)
return ret0
}
// TenantServceAutoscalerRulesDaoTransactions indicates an expected call of TenantServceAutoscalerRulesDaoTransactions
func (mr *MockManagerMockRecorder) TenantServceAutoscalerRulesDaoTransactions(db interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServceAutoscalerRulesDaoTransactions", reflect.TypeOf((*MockManager)(nil).TenantServceAutoscalerRulesDaoTransactions), db)
}
// TenantServceAutoscalerRuleMetricsDao mocks base method
func (m *MockManager) TenantServceAutoscalerRuleMetricsDao() dao.TenantServceAutoscalerRuleMetricsDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServceAutoscalerRuleMetricsDao")
ret0, _ := ret[0].(dao.TenantServceAutoscalerRuleMetricsDao)
return ret0
}
// TenantServceAutoscalerRuleMetricsDao indicates an expected call of TenantServceAutoscalerRuleMetricsDao
func (mr *MockManagerMockRecorder) TenantServceAutoscalerRuleMetricsDao() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServceAutoscalerRuleMetricsDao", reflect.TypeOf((*MockManager)(nil).TenantServceAutoscalerRuleMetricsDao))
}
// TenantServceAutoscalerRuleMetricsDaoTransactions mocks base method
func (m *MockManager) TenantServceAutoscalerRuleMetricsDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRuleMetricsDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServceAutoscalerRuleMetricsDaoTransactions", db)
ret0, _ := ret[0].(dao.TenantServceAutoscalerRuleMetricsDao)
return ret0
}
// TenantServceAutoscalerRuleMetricsDaoTransactions indicates an expected call of TenantServceAutoscalerRuleMetricsDaoTransactions
func (mr *MockManagerMockRecorder) TenantServceAutoscalerRuleMetricsDaoTransactions(db interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServceAutoscalerRuleMetricsDaoTransactions", reflect.TypeOf((*MockManager)(nil).TenantServceAutoscalerRuleMetricsDaoTransactions), db)
}
// TenantServiceScalingRecordsDao mocks base method
func (m *MockManager) TenantServiceScalingRecordsDao() dao.TenantServiceScalingRecordsDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServiceScalingRecordsDao")
ret0, _ := ret[0].(dao.TenantServiceScalingRecordsDao)
return ret0
}
// TenantServiceScalingRecordsDao indicates an expected call of TenantServiceScalingRecordsDao
func (mr *MockManagerMockRecorder) TenantServiceScalingRecordsDao() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServiceScalingRecordsDao", reflect.TypeOf((*MockManager)(nil).TenantServiceScalingRecordsDao))
}
// TenantServiceScalingRecordsDaoTransactions mocks base method
func (m *MockManager) TenantServiceScalingRecordsDaoTransactions(db *gorm.DB) dao.TenantServiceScalingRecordsDao {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TenantServiceScalingRecordsDaoTransactions", db)
ret0, _ := ret[0].(dao.TenantServiceScalingRecordsDao)
return ret0
}
// TenantServiceScalingRecordsDaoTransactions indicates an expected call of TenantServiceScalingRecordsDaoTransactions
func (mr *MockManagerMockRecorder) TenantServiceScalingRecordsDaoTransactions(db interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TenantServiceScalingRecordsDaoTransactions", reflect.TypeOf((*MockManager)(nil).TenantServiceScalingRecordsDaoTransactions), db)
}

View File

@ -41,6 +41,22 @@ type Interface interface {
TableName() string
}
// TenantStatus -
type TenantStatus string
var (
// TenantStatusNormal -
TenantStatusNormal TenantStatus = "normal"
// TenantStatusDeleting -
TenantStatusDeleting TenantStatus = "deleting"
// TenantStatusDeleteFailed -
TenantStatusDeleteFailed TenantStatus = "delete_failed"
)
func (t TenantStatus) String() string {
return string(t)
}
//Tenants 租户信息
type Tenants struct {
Model
@ -48,6 +64,7 @@ type Tenants struct {
UUID string `gorm:"column:uuid;size:33;unique_index"`
EID string `gorm:"column:eid"`
LimitMemory int `gorm:"column:limit_memory"`
Status string `gorm:"column:status;default:'normal'"`
}
//TableName 返回租户表名称
@ -437,3 +454,53 @@ const (
func (t *TenantServiceProbe) TableName() string {
return "tenant_services_probe"
}
// TenantServiceAutoscalerRules -
type TenantServiceAutoscalerRules struct {
Model
RuleID string `gorm:"column:rule_id;unique;size:32"`
ServiceID string `gorm:"column:service_id;size:32"`
Enable bool `gorm:"column:enable"`
XPAType string `gorm:"column:xpa_type;size:3"`
MinReplicas int `gorm:"colume:min_replicas"`
MaxReplicas int `gorm:"colume:max_replicas"`
}
// TableName -
func (t *TenantServiceAutoscalerRules) TableName() string {
return "tenant_services_autoscaler_rules"
}
// TenantServiceAutoscalerRuleMetrics -
type TenantServiceAutoscalerRuleMetrics struct {
Model
RuleID string `gorm:"column:rule_id;size:32;not null"`
MetricsType string `gorm:"column:metric_type;not null"`
MetricsName string `gorm:"column:metric_name;not null"`
MetricTargetType string `gorm:"column:metric_target_type;not null"`
MetricTargetValue int `gorm:"column:metric_target_value;not null"`
}
// TableName -
func (t *TenantServiceAutoscalerRuleMetrics) TableName() string {
return "tenant_services_autoscaler_rule_metrics"
}
// TenantServiceScalingRecords -
type TenantServiceScalingRecords struct {
Model
ServiceID string `gorm:"column:service_id" json:"-"`
RuleID string `gorm:"column:rule_id" json:"rule_id"`
EventName string `gorm:"column:event_name;not null" json:"record_id"`
RecordType string `gorm:"column:record_type" json:"record_type"`
Reason string `gorm:"column:reason" json:"reason"`
Count int32 `gorm:"column:count" json:"count"`
Description string `gorm:"column:description;size:1023" json:"description"`
Operator string `gorm:"column:operator" json:"operator"`
LastTime time.Time `gorm:"column:last_time" json:"last_time"`
}
// TableName -
func (t *TenantServiceScalingRecords) TableName() string {
return "tenant_services_scaling_records"
}

View File

@ -95,6 +95,16 @@ func (t *PluginDaoImpl) GetPluginsByTenantID(tenantID string) ([]*model.TenantPl
return plugins, nil
}
// ListByTenantID -
func (t *PluginDaoImpl) ListByTenantID(tenantID string) ([]*model.TenantPlugin, error) {
var plugins []*model.TenantPlugin
if err := t.DB.Where("tenant_id=?", tenantID).Find(&plugins).Error; err != nil {
return nil, err
}
return plugins, nil
}
//PluginDefaultENVDaoImpl PluginDefaultENVDaoImpl
type PluginDefaultENVDaoImpl struct {
DB *gorm.DB

View File

@ -25,12 +25,12 @@ import (
"strconv"
"time"
"github.com/goodrain/rainbond/db/dao"
"github.com/Sirupsen/logrus"
"github.com/jinzhu/gorm"
"github.com/goodrain/rainbond/db/dao"
"github.com/goodrain/rainbond/db/errors"
"github.com/goodrain/rainbond/db/model"
"github.com/jinzhu/gorm"
)
//TenantDaoImpl 租户信息管理
@ -91,7 +91,7 @@ func (t *TenantDaoImpl) GetTenantIDByName(name string) (*model.Tenants, error) {
func (t *TenantDaoImpl) GetALLTenants(query string) ([]*model.Tenants, error) {
var tenants []*model.Tenants
if query != "" {
if err := t.DB.Where("name like '%?%'", query).Find(&tenants).Error; err != nil {
if err := t.DB.Where("name like ?", "%"+query+"%").Find(&tenants).Error; err != nil {
return nil, err
}
} else {
@ -149,9 +149,8 @@ func (t *TenantDaoImpl) GetTenantLimitsByNames(names []string) (limit map[string
return
}
//GetALLTenants GetALLTenants
// GetPagedTenants -
func (t *TenantDaoImpl) GetPagedTenants(offset, len int) ([]*model.Tenants, error) {
var tenants []*model.Tenants
if err := t.DB.Find(&tenants).Group("").Error; err != nil {
return nil, err
@ -159,6 +158,15 @@ func (t *TenantDaoImpl) GetPagedTenants(offset, len int) ([]*model.Tenants, erro
return tenants, nil
}
// DelByTenantID -
func (t *TenantDaoImpl) DelByTenantID(tenantID string) error {
if err := t.DB.Where("uuid=?", tenantID).Delete(&model.Tenants{}).Error; err != nil {
return err
}
return nil
}
//TenantServicesDaoImpl 租户应用dao
type TenantServicesDaoImpl struct {
DB *gorm.DB
@ -176,6 +184,16 @@ func (t *TenantServicesDaoImpl) GetAllServicesID() ([]*model.TenantServices, err
return services, nil
}
// ListServicesByTenantID -
func (t *TenantServicesDaoImpl) ListServicesByTenantID(tenantID string) ([]*model.TenantServices, error) {
var services []*model.TenantServices
if err := t.DB.Where("tenant_id=?", tenantID).Find(&services).Error; err != nil {
return nil, err
}
return services, nil
}
//UpdateDeployVersion update service current deploy version
func (t *TenantServicesDaoImpl) UpdateDeployVersion(serviceID, deployversion string) error {
if err := t.DB.Exec("update tenant_services set deploy_version=? where service_id=?", deployversion, serviceID).Error; err != nil {
@ -216,7 +234,7 @@ func (t *TenantServicesDaoImpl) GetServiceByID(serviceID string) (*model.TenantS
return &service, nil
}
//GetServiceByID 获取服务通过服务别名
//GetServiceByServiceAlias 获取服务通过服务别名
func (t *TenantServicesDaoImpl) GetServiceByServiceAlias(serviceAlias string) (*model.TenantServices, error) {
var service model.TenantServices
if err := t.DB.Where("service_alias=?", serviceAlias).Find(&service).Error; err != nil {
@ -441,7 +459,7 @@ func (t *TenantServicesDaoImpl) DeleteServiceByServiceID(serviceID string) error
return nil
}
// ListThirdPartyService lists all third party services
// ListThirdPartyServices lists all third party services
func (t *TenantServicesDaoImpl) ListThirdPartyServices() ([]*model.TenantServices, error) {
var res []*model.TenantServices
if err := t.DB.Where("kind=?", model.ServiceKindThirdParty.String()).Find(&res).Error; err != nil {
@ -476,6 +494,7 @@ func (t *TenantServicesDeleteImpl) UpdateModel(mo model.Interface) error {
return nil
}
// GetTenantServicesDeleteByCreateTime -
func (t *TenantServicesDeleteImpl) GetTenantServicesDeleteByCreateTime(createTime time.Time) ([]*model.TenantServicesDelete, error) {
var ServiceDel []*model.TenantServicesDelete
if err := t.DB.Where("create_time < ?", createTime).Find(&ServiceDel).Error; err != nil {
@ -487,6 +506,7 @@ func (t *TenantServicesDeleteImpl) GetTenantServicesDeleteByCreateTime(createTim
return ServiceDel, nil
}
// DeleteTenantServicesDelete -
func (t *TenantServicesDeleteImpl) DeleteTenantServicesDelete(record *model.TenantServicesDelete) error {
if err := t.DB.Delete(record).Error; err != nil {
return err
@ -584,7 +604,7 @@ func (t *TenantServicesPortDaoImpl) GetPort(serviceID string, port int) (*model.
return &oldPort, nil
}
// GetEnablePort returns opened ports.
// GetOpenedPorts returns opened ports.
func (t *TenantServicesPortDaoImpl) GetOpenedPorts(serviceID string) ([]*model.TenantServicesPort, error) {
var ports []*model.TenantServicesPort
if err := t.DB.Where("service_id = ? and (is_inner_service=1 or is_outer_service=1)", serviceID).
@ -1411,7 +1431,7 @@ func (t *ServiceLabelDaoImpl) GetTenantServiceTypeLabel(serviceID string) (*mode
return &label, nil
}
//DELTenantServiceLabelsByLabelvaluesAndServiceID DELTenantServiceLabelsByLabelvaluesAndServiceID
//DelTenantServiceLabelsByLabelValuesAndServiceID DELTenantServiceLabelsByLabelvaluesAndServiceID
func (t *ServiceLabelDaoImpl) DelTenantServiceLabelsByLabelValuesAndServiceID(serviceID string) error {
var label model.TenantServiceLable
if err := t.DB.Where("service_id=? and label_value=?", serviceID, model.LabelKeyNodeSelector).Delete(&label).Error; err != nil {
@ -1420,7 +1440,7 @@ func (t *ServiceLabelDaoImpl) DelTenantServiceLabelsByLabelValuesAndServiceID(se
return nil
}
//DelTenantServiceLabels deletes labels
//DelTenantServiceLabelsByServiceIDKeyValue deletes labels
func (t *ServiceLabelDaoImpl) DelTenantServiceLabelsByServiceIDKeyValue(serviceID string, labelKey string,
labelValue string) error {
var label model.TenantServiceLable
@ -1439,3 +1459,183 @@ func (t *ServiceLabelDaoImpl) DelTenantServiceLabelsByServiceIDKey(serviceID str
}
return nil
}
// TenantServceAutoscalerRulesDaoImpl -
type TenantServceAutoscalerRulesDaoImpl struct {
DB *gorm.DB
}
// AddModel -
func (t *TenantServceAutoscalerRulesDaoImpl) AddModel(mo model.Interface) error {
rule := mo.(*model.TenantServiceAutoscalerRules)
var old model.TenantServiceAutoscalerRules
if ok := t.DB.Where("rule_id = ?", rule.RuleID).Find(&old).RecordNotFound(); ok {
if err := t.DB.Create(rule).Error; err != nil {
return err
}
} else {
return errors.ErrRecordAlreadyExist
}
return nil
}
// UpdateModel -
func (t *TenantServceAutoscalerRulesDaoImpl) UpdateModel(mo model.Interface) error {
rule := mo.(*model.TenantServiceAutoscalerRules)
if err := t.DB.Save(rule).Error; err != nil {
return err
}
return nil
}
// GetByRuleID -
func (t *TenantServceAutoscalerRulesDaoImpl) GetByRuleID(ruleID string) (*model.TenantServiceAutoscalerRules, error) {
var rule model.TenantServiceAutoscalerRules
if err := t.DB.Where("rule_id=?", ruleID).Find(&rule).Error; err != nil {
return nil, err
}
return &rule, nil
}
// ListByServiceID -
func (t *TenantServceAutoscalerRulesDaoImpl) ListByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error) {
var rules []*model.TenantServiceAutoscalerRules
if err := t.DB.Where("service_id=?", serviceID).Find(&rules).Error; err != nil {
return nil, err
}
return rules, nil
}
// ListEnableOnesByServiceID -
func (t *TenantServceAutoscalerRulesDaoImpl) ListEnableOnesByServiceID(serviceID string) ([]*model.TenantServiceAutoscalerRules, error) {
var rules []*model.TenantServiceAutoscalerRules
if err := t.DB.Where("service_id=? and enable=?", serviceID, true).Find(&rules).Error; err != nil {
return nil, err
}
return rules, nil
}
// TenantServceAutoscalerRuleMetricsDaoImpl -
type TenantServceAutoscalerRuleMetricsDaoImpl struct {
DB *gorm.DB
}
// AddModel -
func (t *TenantServceAutoscalerRuleMetricsDaoImpl) AddModel(mo model.Interface) error {
metric := mo.(*model.TenantServiceAutoscalerRuleMetrics)
var old model.TenantServiceAutoscalerRuleMetrics
if ok := t.DB.Where("rule_id=? and metric_type=? and metric_name=?", metric.RuleID, metric.MetricsType, metric.MetricsName).Find(&old).RecordNotFound(); ok {
if err := t.DB.Create(metric).Error; err != nil {
return err
}
} else {
return errors.ErrRecordAlreadyExist
}
return nil
}
// UpdateModel -
func (t *TenantServceAutoscalerRuleMetricsDaoImpl) UpdateModel(mo model.Interface) error {
metric := mo.(*model.TenantServiceAutoscalerRuleMetrics)
if err := t.DB.Save(metric).Error; err != nil {
return err
}
return nil
}
// UpdateOrCreate -
func (t *TenantServceAutoscalerRuleMetricsDaoImpl) UpdateOrCreate(metric *model.TenantServiceAutoscalerRuleMetrics) error {
var old model.TenantServiceAutoscalerRuleMetrics
if ok := t.DB.Where("rule_id=? and metric_type=? and metric_name=?", metric.RuleID, metric.MetricsType, metric.MetricsName).Find(&old).RecordNotFound(); ok {
if err := t.DB.Create(metric).Error; err != nil {
return err
}
} else {
old.MetricTargetType = metric.MetricTargetType
old.MetricTargetValue = metric.MetricTargetValue
if err := t.DB.Save(&old).Error; err != nil {
return err
}
}
return nil
}
// ListByRuleID -
func (t *TenantServceAutoscalerRuleMetricsDaoImpl) ListByRuleID(ruleID string) ([]*model.TenantServiceAutoscalerRuleMetrics, error) {
var metrics []*model.TenantServiceAutoscalerRuleMetrics
if err := t.DB.Where("rule_id=?", ruleID).Find(&metrics).Error; err != nil {
return nil, err
}
return metrics, nil
}
// DeleteByRuleID -
func (t *TenantServceAutoscalerRuleMetricsDaoImpl) DeleteByRuleID(ruldID string) error {
if err := t.DB.Where("rule_id=?", ruldID).Delete(&model.TenantServiceAutoscalerRuleMetrics{}).Error; err != nil {
return err
}
return nil
}
// TenantServiceScalingRecordsDaoImpl -
type TenantServiceScalingRecordsDaoImpl struct {
DB *gorm.DB
}
// AddModel -
func (t *TenantServiceScalingRecordsDaoImpl) AddModel(mo model.Interface) error {
record := mo.(*model.TenantServiceScalingRecords)
var old model.TenantServiceScalingRecords
if ok := t.DB.Where("event_name=?", record.EventName).Find(&old).RecordNotFound(); ok {
if err := t.DB.Create(record).Error; err != nil {
return err
}
} else {
return errors.ErrRecordAlreadyExist
}
return nil
}
// UpdateModel -
func (t *TenantServiceScalingRecordsDaoImpl) UpdateModel(mo model.Interface) error {
record := mo.(*model.TenantServiceScalingRecords)
if err := t.DB.Save(record).Error; err != nil {
return err
}
return nil
}
// UpdateOrCreate -
func (t *TenantServiceScalingRecordsDaoImpl) UpdateOrCreate(new *model.TenantServiceScalingRecords) error {
var old model.TenantServiceScalingRecords
if ok := t.DB.Where("event_name=?", new.EventName).Find(&old).RecordNotFound(); ok {
return t.DB.Create(new).Error
}
old.Count = new.Count
old.LastTime = new.LastTime
return t.DB.Save(&old).Error
}
// ListByServiceID -
func (t *TenantServiceScalingRecordsDaoImpl) ListByServiceID(serviceID string, offset, limit int) ([]*model.TenantServiceScalingRecords, error) {
var records []*model.TenantServiceScalingRecords
if err := t.DB.Where("service_id=?", serviceID).Offset(offset).Limit(limit).Order("last_time desc").Find(&records).Error; err != nil {
return nil, err
}
return records, nil
}
// CountByServiceID -
func (t *TenantServiceScalingRecordsDaoImpl) CountByServiceID(serviceID string) (int, error) {
record := model.TenantServiceScalingRecords{}
var count int
if err := t.DB.Table(record.TableName()).Where("service_id=?", serviceID).Count(&count).Error; err != nil {
return 0, err
}
return count, nil
}

View File

@ -19,9 +19,10 @@
package mysql
import (
"github.com/jinzhu/gorm"
"github.com/goodrain/rainbond/db/dao"
mysqldao "github.com/goodrain/rainbond/db/mysql/dao"
"github.com/jinzhu/gorm"
)
//LicenseDao LicenseDao
@ -409,6 +410,13 @@ func (m *Manager) AppBackupDao() dao.AppBackupDao {
}
}
// AppBackupDaoTransactions -
func (m *Manager) AppBackupDaoTransactions(db *gorm.DB) dao.AppBackupDao {
return &mysqldao.AppBackupDaoImpl{
DB: db,
}
}
//ServiceSourceDao service source db impl
func (m *Manager) ServiceSourceDao() dao.ServiceSourceDao {
return &mysqldao.ServiceSourceImpl{
@ -513,3 +521,45 @@ func (m *Manager) GwRuleConfigDaoTransactions(db *gorm.DB) dao.GwRuleConfigDao {
DB: db,
}
}
// TenantServceAutoscalerRulesDao -
func (m *Manager) TenantServceAutoscalerRulesDao() dao.TenantServceAutoscalerRulesDao {
return &mysqldao.TenantServceAutoscalerRulesDaoImpl{
DB: m.db,
}
}
// TenantServceAutoscalerRulesDaoTransactions -
func (m *Manager) TenantServceAutoscalerRulesDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRulesDao {
return &mysqldao.TenantServceAutoscalerRulesDaoImpl{
DB: db,
}
}
// TenantServceAutoscalerRuleMetricsDao -
func (m *Manager) TenantServceAutoscalerRuleMetricsDao() dao.TenantServceAutoscalerRuleMetricsDao {
return &mysqldao.TenantServceAutoscalerRuleMetricsDaoImpl{
DB: m.db,
}
}
// TenantServceAutoscalerRuleMetricsDaoTransactions -
func (m *Manager) TenantServceAutoscalerRuleMetricsDaoTransactions(db *gorm.DB) dao.TenantServceAutoscalerRuleMetricsDao {
return &mysqldao.TenantServceAutoscalerRuleMetricsDaoImpl{
DB: db,
}
}
// TenantServiceScalingRecordsDao -
func (m *Manager) TenantServiceScalingRecordsDao() dao.TenantServiceScalingRecordsDao {
return &mysqldao.TenantServiceScalingRecordsDaoImpl{
DB: m.db,
}
}
// TenantServiceScalingRecordsDaoTransactions -
func (m *Manager) TenantServiceScalingRecordsDaoTransactions(db *gorm.DB) dao.TenantServiceScalingRecordsDao {
return &mysqldao.TenantServiceScalingRecordsDaoImpl{
DB: db,
}
}

View File

@ -77,6 +77,16 @@ func (m *Manager) Begin() *gorm.DB {
return m.db.Begin()
}
// EnsureEndTransactionFunc -
func (m *Manager) EnsureEndTransactionFunc() func(tx *gorm.DB) {
return func(tx *gorm.DB) {
if r := recover(); r != nil {
logrus.Errorf("Unexpected panic occurred, rollback transaction: %v", r)
tx.Rollback()
}
}
}
//Print Print
func (m *Manager) Print(v ...interface{}) {
logrus.Info(v...)
@ -122,6 +132,10 @@ func (m *Manager) RegisterTableModel() {
m.models = append(m.models, &model.Endpoint{})
m.models = append(m.models, &model.ThirdPartySvcDiscoveryCfg{})
m.models = append(m.models, &model.GwRuleConfig{})
// pod autoscaler
m.models = append(m.models, &model.TenantServiceAutoscalerRules{})
m.models = append(m.models, &model.TenantServiceAutoscalerRuleMetrics{})
m.models = append(m.models, &model.TenantServiceScalingRecords{})
}
//CheckTable check and create tables

View File

@ -0,0 +1,95 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package db
import (
"context"
"fmt"
dbconfig "github.com/goodrain/rainbond/db/config"
"github.com/goodrain/rainbond/db/model"
"github.com/testcontainers/testcontainers-go"
"testing"
"time"
)
func TestManager_TenantServiceScalingRecordsDaoImpl_UpdateOrCreate(t *testing.T) {
dbname := "region"
rootpw := "rainbond"
ctx := context.Background()
req := testcontainers.ContainerRequest{
Image: "mariadb",
ExposedPorts: []string{"3306/tcp"},
Env: map[string]string{
"MYSQL_ROOT_PASSWORD": rootpw,
"MYSQL_DATABASE": dbname,
},
Cmd: "--character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci",
}
mariadb, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
t.Fatal(err)
}
defer mariadb.Terminate(ctx)
host, err := mariadb.Host(ctx)
if err != nil {
t.Error(err)
}
port, err := mariadb.MappedPort(ctx, "3306")
if err != nil {
t.Error(err)
}
connInfo := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", "root",
rootpw, host, port.Int(), dbname)
tryTimes := 3
for {
if err := CreateManager(dbconfig.Config{
DBType: "mysql",
MysqlConnectionInfo: connInfo,
}); err != nil {
if tryTimes == 0 {
t.Fatalf("Connect info: %s; error creating db manager: %v", connInfo, err)
} else {
tryTimes = tryTimes - 1
time.Sleep(10 * time.Second)
continue
}
}
break
}
record := &model.TenantServiceScalingRecords{
ServiceID: "45197f4936cf45efa2ac4831ce42025a",
RuleID: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
EventName: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx.15d6ede119c8af35",
RecordType: "hpa",
Reason: "FailedGetResourceMetric",
Description: "unable to get metrics for resource memory: no metrics returned from resource metrics API",
Count: 2,
LastTime: time.Now(),
}
if err := GetManager().TenantServiceScalingRecordsDao().UpdateOrCreate(record); err != nil {
t.Fatal(err)
}
}

View File

@ -129,7 +129,6 @@ func (k *KeepAlive) reg() error {
k.gRPCResolver = &etcdnaming.GRPCResolver{Client: k.etcdClient}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
resp, err := k.etcdClient.Grant(ctx, k.TTL+3)
if err != nil {
return err

View File

@ -20,11 +20,12 @@ package entry
import (
"errors"
"net"
"time"
"github.com/goodrain/rainbond/eventlog/conf"
"github.com/goodrain/rainbond/eventlog/store"
"github.com/goodrain/rainbond/eventlog/util"
"net"
"time"
"golang.org/x/net/context"
@ -122,7 +123,6 @@ func (s *DockerLogServer) OnConnect(c *util.Conn) bool {
// OnMessage is called when the connection receives a packet,
// If the return value of false is closed
func (s *DockerLogServer) OnMessage(p util.Packet) bool {
if len(p.Serialize()) > 0 {
select {
case s.messageChan <- p.Serialize():
@ -131,11 +131,9 @@ func (s *DockerLogServer) OnMessage(p util.Packet) bool {
//TODO: return false and receive exist
return true
}
} else {
logrus.Error("receive a null message")
}
return true
}

View File

@ -26,6 +26,8 @@ import (
"strings"
"time"
"github.com/goodrain/rainbond/util"
"github.com/go-chi/chi"
"github.com/Sirupsen/logrus"
@ -474,6 +476,9 @@ func (s *SocketServer) listen() {
r.Get("/services/{serviceID}/pubsub", s.pubsub)
//monitor setting
s.prometheus(r)
//pprof debug
util.ProfilerSetup(r)
if s.conf.SSL {
go func() {
addr := fmt.Sprintf("%s:%d", s.conf.BindIP, s.conf.SSLBindPort)

View File

@ -184,7 +184,6 @@ func (c *Conn) readLoop() {
return
default:
}
//c.conn.SetReadDeadline(time.Now().Add(time.Second * 3))
p, err := c.pro.ReadPacket()
if err == io.EOF {
return
@ -195,21 +194,18 @@ func (c *Conn) readLoop() {
if err == errClosed {
return
}
if err == io.ErrNoProgress {
return
}
if err != nil {
if strings.HasSuffix(err.Error(), "use of closed network connection") {
logrus.Error("use of closed network connection")
return
}
logrus.Error("read package error:", err.Error())
return
}
//如果收到0字节等待100ms
//经测试,以下情况返回空:
// 1. 一定时间未收到数据。
// 2. 网络缓冲区已满。次有待研究原因。
// 网上有说go返回0说明对方已关闭连接需要关闭连接
if p.IsNull() {
// time.Sleep(time.Millisecond * 100)
// continue
return
}
if p.IsPing() {
@ -219,7 +215,7 @@ func (c *Conn) readLoop() {
continue
}
if ok := c.srv.callback.OnMessage(p); !ok {
return
continue
}
if ok := c.timer.Reset(timeOut); !ok {
c.timer = time.NewTimer(timeOut)

View File

@ -23,7 +23,9 @@ import (
"bytes"
"encoding/binary"
"errors"
"io"
"net"
"time"
)
type Packet interface {
@ -87,6 +89,8 @@ func (m *MessageProtocol) isPing(s string) bool {
return s == "0x00ping"
}
const maxConsecutiveEmptyReads = 100
//Decode 解码数据流
func (m *MessageProtocol) Decode() (string, error) {
// 读取消息的长度
@ -104,6 +108,7 @@ func (m *MessageProtocol) Decode() (string, error) {
return "", errClosed
}
if int32(m.reader.Buffered()) < length+4 {
var retry = 0
for m.cacheSize < int64(length+4) {
//read size must <= length+4
readSize := int64(length+4) - m.cacheSize
@ -115,11 +120,22 @@ func (m *MessageProtocol) Decode() (string, error) {
if err != nil {
return "", err
}
if size == 0 {
return "", errClosed
//Two consecutive reads 0 bytes, return io.ErrNoProgress
//Read() will read up to len(p) into p, when possible.
//After a Read() call, n may be less then len(p).
//Upon error, Read() may still return n bytes in buffer p. For instance, reading from a TCP socket that is abruptly closed. Depending on your use, you may choose to keep the bytes in p or retry.
//When a Read() exhausts available data, a reader may return a non-zero n and err=io.EOF. However, depending on implementation, a reader may choose to return a non-zero n and err = nil at the end of stream. In that case, any subsequent reads must return n=0, err=io.EOF.
//Lastly, a call to Read() that returns n=0 and err=nil does not mean EOF as the next call to Read() may return more data.
if size <= 0 {
retry++
if retry > maxConsecutiveEmptyReads {
return "", io.ErrNoProgress
}
time.Sleep(time.Millisecond * 10)
} else {
m.cacheSize += int64(size)
m.cache.Write(buffer)
}
m.cacheSize += int64(size)
m.cache.Write(buffer)
}
result := m.cache.Bytes()[4:]
m.cache.Reset()
@ -134,7 +150,7 @@ func (m *MessageProtocol) Decode() (string, error) {
return "", err
}
if size == 0 {
return "", errClosed
return "", io.ErrNoProgress
}
return string(pack[4:]), nil
}

View File

@ -22,6 +22,7 @@ import (
"context"
"fmt"
"net"
"sync"
"time"
"github.com/Sirupsen/logrus"
@ -43,6 +44,7 @@ type IPManager interface {
type ipManager struct {
IPPool *util.IPPool
ipLease map[string]clientv3.LeaseID
lock sync.Mutex
etcdCli *clientv3.Client
config option.Config
}
@ -94,26 +96,29 @@ func (i *ipManager) syncIP() {
func (i *ipManager) updateIP(ips ...net.IP) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
lease := clientv3.NewLease(i.etcdCli)
i.lock.Lock()
defer i.lock.Unlock()
leaseClient := clientv3.NewLease(i.etcdCli)
for in := range ips {
ip := ips[in]
if id, ok := i.ipLease[ip.String()]; ok {
if _, err := lease.KeepAliveOnce(ctx, id); err == nil {
if _, err := leaseClient.KeepAliveOnce(ctx, id); err == nil {
continue
} else {
logrus.Warningf("keep alive ip key failure %s", err.Error())
}
}
res, err := lease.Grant(ctx, 10)
res, err := leaseClient.Grant(ctx, 10)
if err != nil {
logrus.Errorf("put gateway ip to etcd failure %s", err.Error())
return err
continue
}
_, err = i.etcdCli.Put(ctx, fmt.Sprintf("/rainbond/gateway/ips/%s", ip.String()), ip.String(), clientv3.WithLease(res.ID))
if err != nil {
logrus.Errorf("put gateway ip to etcd failure %s", err.Error())
continue
}
logrus.Infof("gateway init ip %s", ip.String())
logrus.Infof("gateway init add ip %s", ip.String())
i.ipLease[ip.String()] = res.ID
}
return nil
@ -122,6 +127,8 @@ func (i *ipManager) updateIP(ips ...net.IP) error {
func (i *ipManager) deleteIP(ips ...net.IP) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
i.lock.Lock()
defer i.lock.Unlock()
for _, ip := range ips {
_, err := i.etcdCli.Delete(ctx, fmt.Sprintf("/rainbond/gateway/ips/%s", ip.String()))
if err != nil {

View File

@ -121,7 +121,8 @@ func (n *NginxConfigFileTemplete) WriteServerAndUpstream(first bool, c option.Co
}
n.writeLocks[tenant].Lock()
defer n.writeLocks[tenant].Unlock()
serverConfigFile := path.Join(n.configFileDirPath, configType, tenant, "servers.conf")
filename := fmt.Sprintf("%s_servers.conf", tenant)
serverConfigFile := path.Join(n.configFileDirPath, configType, tenant, filename)
upstreamConfigFile := path.Join(n.configFileDirPath, "stream", tenant, "upstreams.conf")
serverBody, err := n.serverTmpl.Write(&NginxServerContext{Server: server, Set: c})
if err != nil {

View File

@ -59,23 +59,27 @@ func getClusterInfo(c *cli.Context) error {
fmt.Println("Exec Command: journalctl -fu rbd-api")
os.Exit(1)
}
healthCPUFree := fmt.Sprintf("%.2f", float32(clusterInfo.HealthCapCPU)-clusterInfo.HealthReqCPU)
unhealthCPUFree := fmt.Sprintf("%.2f", float32(clusterInfo.UnhealthCapCPU)-clusterInfo.UnhealthReqCPU)
healthMemFree := fmt.Sprintf("%d", clusterInfo.HealthCapMem-clusterInfo.HealthReqMem)
unhealthMemFree := fmt.Sprintf("%d", clusterInfo.UnhealthCapMem-clusterInfo.UnhealthReqMem)
table := uitable.New()
table.AddRow("", "Used/Total", "Use of")
table.AddRow("CPU", fmt.Sprintf("%2.f/%d", clusterInfo.ReqCPU, clusterInfo.CapCPU),
table.AddRow("", "Used/Total", "Use of", "Health free", "Unhealth free")
table.AddRow("CPU(Core)", fmt.Sprintf("%.2f/%d", clusterInfo.ReqCPU, clusterInfo.CapCPU),
fmt.Sprintf("%d", func() int {
if clusterInfo.CapCPU == 0 {
return 0
}
return int(clusterInfo.ReqCPU * 100 / float32(clusterInfo.CapCPU))
}())+"%")
table.AddRow("Memory", fmt.Sprintf("%d/%d", clusterInfo.ReqMem, clusterInfo.CapMem),
}())+"%", "\033[0;32;32m"+healthCPUFree+"\033[0m \t\t", unhealthCPUFree)
table.AddRow("Memory(Mb)", fmt.Sprintf("%d/%d", clusterInfo.ReqMem, clusterInfo.CapMem),
fmt.Sprintf("%d", func() int {
if clusterInfo.CapMem == 0 {
return 0
}
return int(float32(clusterInfo.ReqMem*100) / float32(clusterInfo.CapMem))
}())+"%")
table.AddRow("DistributedDisk", fmt.Sprintf("%dGb/%dGb", clusterInfo.ReqDisk/1024/1024/1024, clusterInfo.CapDisk/1024/1024/1024),
}())+"%", "\033[0;32;32m"+healthMemFree+" \033[0m \t\t", unhealthMemFree)
table.AddRow("DistributedDisk(Gb)", fmt.Sprintf("%d/%d", clusterInfo.ReqDisk/1024/1024/1024, clusterInfo.CapDisk/1024/1024/1024),
fmt.Sprintf("%.2f", func() float32 {
if clusterInfo.CapDisk == 0 {
return 0

View File

@ -30,10 +30,8 @@ import (
"github.com/goodrain/rainbond/util"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/grctl/clients"
"github.com/urfave/cli" //"github.com/goodrain/rainbond/grctl/clients"
)
@ -221,41 +219,6 @@ func NewCmdInit() cli.Command {
return c
}
//NewCmdInstallStatus install status
func NewCmdInstallStatus() cli.Command {
c := cli.Command{
Name: "install_status",
Flags: []cli.Flag{
cli.StringFlag{
Name: "taskID",
Usage: "install_k8s,空则自动寻找",
},
},
Usage: "获取task执行状态。grctl install_status",
Action: func(c *cli.Context) error {
taskID := c.String("taskID")
if taskID == "" {
tasks, err := clients.RegionClient.Tasks().List()
if err != nil {
logrus.Errorf("error get task list,details %s", err.Error())
return nil
}
for _, v := range tasks {
for _, vs := range v.Status {
if vs.Status == "start" || vs.Status == "create" {
//Status(v.ID)
return nil
}
}
}
} else {
//Status(taskID)
}
return nil
},
}
return c
}
func updateConfigFile(path string, config map[string]interface{}) error {
initConfig := make(map[string]interface{})
var file *os.File

View File

@ -1,270 +0,0 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/grctl/clients"
"github.com/urfave/cli"
)
//GetCommand get command
func GetCommand(status bool) []cli.Command {
c := []cli.Command{
{
Name: "compute",
Usage: "安装计算节点 compute -h",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "nodes",
Usage: "hostID1 hostID2 ...,空表示全部",
},
},
Action: func(c *cli.Context) error {
return Task(c, "check_compute_services", status)
},
Subcommands: []cli.Command{
{
Name: "storage_client",
Usage: "step 1 storage_client",
Action: func(c *cli.Context) error {
return Task(c, "install_storage_client", status)
},
},
{
Name: "kubelet",
Usage: "need storage_client",
Action: func(c *cli.Context) error {
return Task(c, "install_kubelet", status)
},
},
{
Name: "network_compute",
Usage: "need storage_client,kubelet",
Action: func(c *cli.Context) error {
return Task(c, "install_network_compute", status)
},
},
},
},
{
Name: "manage_base",
Usage: "安装管理节点基础服务。 manage_base -h",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "nodes",
Usage: "hostID1 hostID2 ...,空表示全部",
},
},
Action: func(c *cli.Context) error {
return Task(c, "check_manage_base_services", status)
},
Subcommands: []cli.Command{
{
Name: "docker",
Usage: "step 1 安装docker",
Action: func(c *cli.Context) error {
return Task(c, "install_docker", status)
},
},
{
Name: "db",
Usage: "step 2 安装db",
Action: func(c *cli.Context) error {
return Task(c, "install_db", status)
},
},
{
Name: "base_plugins",
Usage: "step 3 基础插件",
Action: func(c *cli.Context) error {
return Task(c, "install_base_plugins", status)
},
},
{
Name: "acp_plugins",
Usage: "step 4 acp插件",
Action: func(c *cli.Context) error {
return Task(c, "install_acp_plugins", status)
},
},
},
},
{
Name: "manage",
Usage: "安装管理节点。 manage -h",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "nodes",
Usage: "hostID1 hostID2 ...,空表示全部",
},
},
Subcommands: []cli.Command{
{
Name: "storage",
Usage: "step 1 安装存储",
Action: func(c *cli.Context) error {
return Task(c, "install_storage", status)
},
},
{
Name: "k8s",
Usage: "need storage",
Action: func(c *cli.Context) error {
return Task(c, "install_k8s", status)
},
},
{
Name: "network",
Usage: "need storage,k8s",
Action: func(c *cli.Context) error {
return Task(c, "install_network", status)
},
},
{
Name: "plugins",
Usage: "need storage,k8s,network",
Action: func(c *cli.Context) error {
return Task(c, "install_plugins", status)
},
},
},
Action: func(c *cli.Context) error {
return Task(c, "check_manage_services", status)
},
},
}
return c
}
//NewCmdInstall install cmd
func NewCmdInstall() cli.Command {
c := cli.Command{
Name: "install",
Usage: "安装命令相关子命令。grctl install -h",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "nodes",
Usage: "hostID1 hostID2 ...,空表示全部",
},
},
Subcommands: GetCommand(false),
}
return c
}
//Status status
func Status(task string, nodes []string) {
checkFail := 0
lastState := ""
set := make(map[string]bool)
for _, v := range nodes {
set[v] = true
}
fmt.Printf("%s task is start\n", task)
lastState = "Start"
for checkFail < 3 {
time.Sleep(3 * time.Second)
taskE, err := clients.RegionClient.Tasks().Get(task)
if err != nil {
logrus.Warnf("error get task %s ,details %s,retry", task, err.String())
checkFail++
continue
}
//status,error:=clients.NodeClient.Tasks().Status(task)
status, err := clients.RegionClient.Tasks().GetTaskStatus(task)
if err != nil || status == nil {
logrus.Warnf("error get task %s status,details %s,retry", task, err.String())
checkFail++
continue
}
for k, v := range status {
if !set[k] {
fmt.Print("..")
continue
}
if strings.Contains(v.Status, "error") || strings.Contains(v.CompleStatus, "Failure") || strings.Contains(v.CompleStatus, "Unknow") {
checkFail++
fmt.Printf("error executing task %s \n", task)
for _, v := range taskE.OutPut {
if set[v.NodeID] {
fmt.Printf("on %s :\n %s", v.NodeID, v.Body)
}
}
os.Exit(1)
}
if lastState != v.Status {
fmt.Printf("task %s is %s\n", task, v.Status)
} else {
fmt.Print("..")
}
lastState = v.Status
if v.Status == "complete" || v.CompleStatus == "Success" {
fmt.Printf("task %s is %s %s\n", task, v.Status, v.CompleStatus)
lastState = v.Status
taskFinished := taskE
var nextTasks []string
for _, v := range taskFinished.OutPut {
if !set[v.NodeID] {
continue
}
for _, sv := range v.Status {
if sv.NextTask == nil || len(sv.NextTask) == 0 {
continue
} else {
for _, v := range sv.NextTask {
nextTasks = append(nextTasks, v)
}
}
}
}
if len(nextTasks) > 0 {
fmt.Printf("next will install %v \n", nextTasks)
for _, v := range nextTasks {
Status(v, nodes)
}
}
return
}
}
checkFail = 0
}
}
//Task task
func Task(c *cli.Context, task string, status bool) error {
nodes := c.StringSlice("nodes")
if len(nodes) == 0 {
return fmt.Errorf("nodes can not be empty")
}
err := clients.RegionClient.Tasks().Exec(task, nodes)
if err != nil {
logrus.Errorf("error exec task:%s,details %s", task, err.Error())
return err
}
Status(task, nodes)
return nil
}

View File

@ -25,6 +25,7 @@ import (
"os"
"strconv"
"strings"
"time"
"github.com/goodrain/rainbond/util/ansible"
@ -41,9 +42,13 @@ import (
)
func handleErr(err *util.APIHandleError) {
if err != nil && err.Err != nil {
fmt.Printf(err.String())
os.Exit(1)
if err != nil {
if err.Err != nil {
fmt.Printf(err.String())
os.Exit(1)
} else {
fmt.Println("API return %d", err.Code)
}
}
}
func showError(m string) {
@ -155,11 +160,11 @@ func handleConditionResult(serviceTable *termtables.Table, conditions []client.N
continue
}
var formatReady string
if v.Status == client.ConditionFalse {
if v.Status == client.ConditionFalse || v.Status == client.ConditionUnknown {
if v.Type == client.OutOfDisk || v.Type == client.MemoryPressure || v.Type == client.DiskPressure || v.Type == client.InstallNotReady {
formatReady = "\033[0;32;32m false \033[0m"
} else {
formatReady = "\033[0;31;31m false \033[0m"
formatReady = fmt.Sprintf("\033[0;31;31m %s \033[0m", v.Status)
}
} else {
if v.Type == client.OutOfDisk || v.Type == client.MemoryPressure || v.Type == client.DiskPressure || v.Type == client.InstallNotReady {
@ -168,7 +173,11 @@ func handleConditionResult(serviceTable *termtables.Table, conditions []client.N
formatReady = "\033[0;32;32m true \033[0m"
}
}
serviceTable.AddRow(string(v.Type), formatReady, handleMessage(string(v.Status), v.Message))
serviceTable.AddRow(string(v.Type), formatReady,
v.LastHeartbeatTime.Format(time.RFC3339)[:19],
v.LastTransitionTime.Format(time.RFC3339)[:19],
handleMessage(string(v.Status), v.Message),
)
}
}
@ -181,7 +190,10 @@ func extractReady(serviceTable *termtables.Table, conditions []client.NodeCondit
} else {
formatReady = "\033[0;32;32m true \033[0m"
}
serviceTable.AddRow("\033[0;33;33m "+string(v.Type)+" \033[0m", formatReady, handleMessage(string(v.Status), v.Message))
serviceTable.AddRow("\033[0;33;33m "+string(v.Type)+" \033[0m", formatReady,
v.LastHeartbeatTime.Format(time.RFC3339)[:19],
v.LastTransitionTime.Format(time.RFC3339)[:19],
handleMessage(string(v.Status), v.Message))
}
}
}
@ -249,7 +261,7 @@ func NewCmdNode() cli.Command {
fmt.Println(labeltable)
fmt.Printf("-------------------Service health-----------------------\n")
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Condition", "Result", "Message")
serviceTable.AddHeaders("Condition", "Health", "LastUpdateTime", "LastChangeTime", "Message")
extractReady(serviceTable, v.NodeStatus.Conditions, "Ready")
handleConditionResult(serviceTable, v.NodeStatus.Conditions)
fmt.Println(serviceTable.Render())
@ -445,6 +457,10 @@ func NewCmdNode() cli.Command {
}
k := c.String("key")
v := c.String("val")
if k == "" || v == "" {
logrus.Errorf("label key or value can not be empty")
return nil
}
err := clients.RegionClient.Nodes().Label(hostID).Add(k, v)
handleErr(err)
return nil
@ -522,7 +538,7 @@ func NewCmdNode() cli.Command {
conditions, err := clients.RegionClient.Nodes().Condition(hostID).List()
handleErr(err)
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Condition", "Result", "Message")
serviceTable.AddHeaders("Condition", "Health", "LastUpdateTime", "LastChangeTime", "Message")
handleConditionResult(serviceTable, conditions)
fmt.Println(serviceTable.Render())
return nil

View File

@ -359,8 +359,8 @@ func showServiceDeployInfo(c *cli.Context) error {
table := uitable.New()
table.Wrap = true // wrap columns
tenantID := service.TenantId
serviceID := service.ServiceId
tenantID := service.TenantID
serviceID := service.ServiceID
table.AddRow("Namespace:", tenantID)
table.AddRow("ServiceID:", serviceID)
if deployInfo.Deployment != "" {

View File

@ -136,7 +136,7 @@ func (e *Node) Modify(event *watch.Event) {
//Delete delete
func (e *Node) Delete(event *watch.Event) {
for i, end := range e.endpoints {
url := gjson.Get(event.GetValueString(), "internal_ip").String() + ":6100"
url := gjson.Get(event.GetPreValueString(), "internal_ip").String() + ":6100"
if end.URL == url {
e.endpoints = append(e.endpoints[:i], e.endpoints[i+1:]...)
e.UpdateEndpoints(e.endpoints...)

38
monitor/custom/custom.go Normal file
View File

@ -0,0 +1,38 @@
package custom
import (
"time"
"github.com/goodrain/rainbond/monitor/prometheus"
"github.com/prometheus/common/model"
)
// Metrics metrics struct
type Metrics struct {
Name string
Metrics []string
Interval time.Duration
Timeout time.Duration
Path string
}
// AddMetrics add mysql metrics into prometheus
func AddMetrics(p *prometheus.Manager, metrics Metrics) {
p.UpdateScrape(&prometheus.ScrapeConfig{
JobName: metrics.Name,
ScrapeInterval: model.Duration(metrics.Interval),
ScrapeTimeout: model.Duration(metrics.Timeout),
MetricsPath: metrics.Path,
ServiceDiscoveryConfig: prometheus.ServiceDiscoveryConfig{
StaticConfigs: []*prometheus.Group{
{
Targets: metrics.Metrics,
Labels: map[model.LabelName]model.LabelValue{
"component": model.LabelValue(metrics.Name),
"service_name": model.LabelValue(metrics.Name),
},
},
},
},
})
}

View File

@ -102,7 +102,7 @@ func (d *Monitor) discoverNodes(node *callback.Node, app *callback.App, done <-c
case watch.Deleted:
node.Delete(&event)
isSlave := gjson.Get(event.GetValueString(), "labels.rainbond_node_rule_compute").String()
isSlave := gjson.Get(event.GetPreValueString(), "labels.rainbond_node_rule_compute").String()
if isSlave == "true" {
app.Delete(&event)
}
@ -148,7 +148,7 @@ func (d *Monitor) discoverCadvisor(c *callback.Cadvisor, done <-chan struct{}) {
c.Modify(&event)
}
case watch.Deleted:
isSlave := gjson.Get(event.GetValueString(), "labels.rainbond_node_rule_compute").String()
isSlave := gjson.Get(event.GetPreValueString(), "labels.rainbond_node_rule_compute").String()
if isSlave == "true" {
c.Delete(&event)
}
@ -194,6 +194,7 @@ func (d *Monitor) discoverEtcd(e *callback.Etcd, done <-chan struct{}) {
}
}
// Stop stop monitor
func (d *Monitor) Stop() {
logrus.Info("Stopping all child process for monitor")
d.cancel()
@ -202,6 +203,7 @@ func (d *Monitor) Stop() {
d.client.Close()
}
// NewMonitor new monitor
func NewMonitor(opt *option.Config, p *prometheus.Manager) *Monitor {
ctx, cancel := context.WithCancel(context.Background())
defaultTimeout := time.Second * 3

View File

@ -38,11 +38,15 @@ import (
)
const (
// STARTING starting
STARTING = iota
// STARTED started
STARTED
//STOPPED stoped
STOPPED
)
// Manager manage struct
type Manager struct {
cancel context.CancelFunc
ctx context.Context
@ -56,6 +60,7 @@ type Manager struct {
a *AlertingRulesManager
}
// NewManager new manager
func NewManager(config *option.Config, a *AlertingRulesManager) *Manager {
client := &http.Client{
Timeout: time.Second * 3,
@ -103,6 +108,7 @@ func NewManager(config *option.Config, a *AlertingRulesManager) *Manager {
return m
}
// StartDaemon start prometheus daemon
func (p *Manager) StartDaemon(errchan chan error) {
logrus.Info("Starting prometheus.")
@ -147,6 +153,7 @@ func (p *Manager) StartDaemon(errchan chan error) {
}()
}
// StopDaemon stop daemon
func (p *Manager) StopDaemon() {
if p.Status != STOPPED {
logrus.Info("Stopping prometheus daemon ...")
@ -156,6 +163,7 @@ func (p *Manager) StopDaemon() {
}
}
// RestartDaemon restart daemon
func (p *Manager) RestartDaemon() error {
if p.Status == STARTED {
logrus.Debug("Restart daemon for prometheus.")
@ -167,6 +175,7 @@ func (p *Manager) RestartDaemon() error {
return nil
}
//LoadConfig load config
func (p *Manager) LoadConfig() error {
logrus.Info("Load prometheus config file.")
content, err := ioutil.ReadFile(p.Opt.ConfigFile)
@ -185,6 +194,7 @@ func (p *Manager) LoadConfig() error {
return nil
}
// SaveConfig save config
func (p *Manager) SaveConfig() error {
logrus.Debug("Save prometheus config file.")
data, err := yaml.Marshal(p.Config)
@ -202,6 +212,7 @@ func (p *Manager) SaveConfig() error {
return nil
}
// UpdateScrape update scrape
func (p *Manager) UpdateScrape(scrape *ScrapeConfig) {
logrus.Debugf("update scrape: %+v", scrape)
p.l.Lock()

View File

@ -25,6 +25,7 @@ import (
"strings"
"github.com/goodrain/rainbond/util/disk"
v1 "k8s.io/api/core/v1"
api "github.com/goodrain/rainbond/util/http"
@ -310,31 +311,55 @@ func CapRes(w http.ResponseWriter, r *http.Request) {
//ClusterInfo ClusterInfo
func ClusterInfo(w http.ResponseWriter, r *http.Request) {
usedNodeList := make([]string, 0, 10)
nodes, err := kubecli.GetNodes()
if err != nil {
api.ReturnError(r, w, 500, err.Error())
return
}
var capCPU int64
var capMem int64
type tmpNode struct {
Name string
Health bool
}
var healthCapCPU int64
var healthCapMem int64
var unhealthCapCPU int64
var unhealthCapMem int64
usedNodeList := make([]tmpNode, 0, 10)
for _, v := range nodes {
nodeHealth := false
for _, con := range v.Status.Conditions {
if con.Type == v1.NodeReady {
nodeHealth = con.Status == v1.ConditionTrue
break
}
}
if nodeHealth {
healthCapCPU += v.Status.Allocatable.Cpu().Value()
healthCapMem += v.Status.Allocatable.Memory().Value()
} else {
unhealthCapCPU += v.Status.Allocatable.Cpu().Value()
unhealthCapMem += v.Status.Allocatable.Memory().Value()
}
if v.Spec.Unschedulable == false {
capCPU += v.Status.Capacity.Cpu().Value()
capMem += v.Status.Capacity.Memory().Value()
usedNodeList = append(usedNodeList, v.Name)
usedNodeList = append(usedNodeList, tmpNode{Name: v.Name, Health: nodeHealth})
}
}
var cpuR int64
var memR int64
var healthcpuR int64
var healthmemR int64
var unhealthCPUR int64
var unhealthMemR int64
for _, node := range usedNodeList {
pods, _ := kubecli.GetPodsByNodes(node)
pods, _ := kubecli.GetPodsByNodes(node.Name)
for _, pod := range pods {
for _, c := range pod.Spec.Containers {
rc := c.Resources.Requests.Cpu().MilliValue()
rm := c.Resources.Requests.Memory().Value()
cpuR += rc
memR += rm
if node.Health {
healthcpuR += c.Resources.Requests.Cpu().MilliValue()
healthmemR += c.Resources.Requests.Memory().Value()
} else {
unhealthCPUR += c.Resources.Requests.Cpu().MilliValue()
unhealthMemR += c.Resources.Requests.Memory().Value()
}
}
}
}
@ -344,20 +369,27 @@ func ClusterInfo(w http.ResponseWriter, r *http.Request) {
} else {
diskstauts = disk.DiskUsage(`z:\\`)
}
podMemRequestMB := memR / 1024 / 1024
result := &model.ClusterResource{
CapCPU: int(capCPU),
CapMem: int(capMem) / 1024 / 1024,
ReqCPU: float32(cpuR) / 1000,
ReqMem: int(podMemRequestMB),
ComputeNode: len(nodes),
CapDisk: diskstauts.All,
ReqDisk: diskstauts.Used,
CapCPU: int(healthCapCPU + unhealthCapCPU),
CapMem: int(healthCapMem+unhealthCapMem) / 1024 / 1024,
HealthCapCPU: int(healthCapCPU),
HealthCapMem: int(healthCapMem) / 1024 / 1024,
UnhealthCapCPU: int(unhealthCapCPU),
UnhealthCapMem: int(unhealthCapMem) / 1024 / 1024,
ReqCPU: float32(healthcpuR+unhealthCPUR) / 1000,
ReqMem: int(healthmemR+unhealthMemR) / 1024 / 1024,
HealthReqCPU: float32(healthcpuR) / 1000,
HealthReqMem: int(healthmemR) / 1024 / 1024,
UnhealthReqCPU: float32(unhealthCPUR) / 1000,
UnhealthReqMem: int(unhealthMemR) / 1024 / 1024,
ComputeNode: len(nodes),
CapDisk: diskstauts.All,
ReqDisk: diskstauts.Used,
}
allnodes, _ := nodeService.GetAllNode()
result.AllNode = len(allnodes)
for _, n := range allnodes {
if n.Status != "running" {
if n.Status != "running" || !n.NodeStatus.NodeHealth { //node unhealth status
result.NotReadyNode++
}
}

View File

@ -29,7 +29,7 @@ import (
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/goodrain/rainbond/node/utils"
"github.com/pquerna/ffjson/ffjson"
"k8s.io/api/core/v1" //"github.com/Sirupsen/logrus"
v1 "k8s.io/api/core/v1" //"github.com/Sirupsen/logrus"
)
//Resource 资源
@ -206,16 +206,24 @@ func DoRequest(baseAPI, query, queryType, method string, body []byte) ([]byte, i
//ClusterResource 资源
type ClusterResource struct {
AllNode int `json:"all_node"`
NotReadyNode int `json:"notready_node"`
ComputeNode int `json:"compute_node"`
Tenant int `json:"tenant"`
CapCPU int `json:"cap_cpu"`
CapMem int `json:"cap_mem"`
ReqCPU float32 `json:"req_cpu"`
ReqMem int `json:"req_mem"`
CapDisk uint64 `json:"cap_disk"`
ReqDisk uint64 `json:"req_disk"`
AllNode int `json:"all_node"`
NotReadyNode int `json:"notready_node"`
ComputeNode int `json:"compute_node"`
Tenant int `json:"tenant"`
CapCPU int `json:"cap_cpu"` //可分配CPU总额
CapMem int `json:"cap_mem"` //可分配Mem总额
HealthCapCPU int `json:"health_cap_cpu"` //健康可分配CPU
HealthCapMem int `json:"health_cap_mem"` //健康可分配Mem
UnhealthCapCPU int `json:"unhealth_cap_cpu"` //不健康可分配CPU
UnhealthCapMem int `json:"unhealth_cap_mem"` //不健康可分配Mem
ReqCPU float32 `json:"req_cpu"` //已使用CPU总额
ReqMem int `json:"req_mem"` //已使用Mem总额
HealthReqCPU float32 `json:"health_req_cpu"` //健康已使用CPU
HealthReqMem int `json:"health_req_mem"` //健康已使用Mem
UnhealthReqCPU float32 `json:"unhealth_req_cpu"` //不健康已使用CPU
UnhealthReqMem int `json:"unhealth_req_mem"` //不健康已使用Mem
CapDisk uint64 `json:"cap_disk"`
ReqDisk uint64 `json:"req_disk"`
}
//NodeResource 资源

View File

@ -30,7 +30,7 @@ import (
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/Sirupsen/logrus"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"

View File

@ -20,6 +20,8 @@ package node
import (
"context"
"fmt"
"net/http"
"sync"
"time"
@ -118,27 +120,14 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
return
}
}
if time.Since(v.NodeStatus.NodeUpdateTime) > time.Minute*1 {
if time.Since(v.NodeStatus.NodeUpdateTime) > time.Minute*1 && getNodeHealth(v) {
v.Status = client.Unknown
v.NodeStatus.Status = client.Unknown
r := client.NodeCondition{
Type: client.NodeUp,
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "Node lost connection, state unknown",
}
v.UpdataCondition(r)
v.GetAndUpdateCondition(client.NodeUp, client.ConditionFalse, "", "Node lost connection, state unknown")
//node lost connection, advice offline action
v.NodeStatus.AdviceAction = append(v.NodeStatus.AdviceAction, "offline")
} else {
r := client.NodeCondition{
Type: client.NodeUp,
Status: client.ConditionTrue,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: "Node lost connection, state unknown",
}
v.UpdataCondition(r)
v.GetAndUpdateCondition(client.NodeUp, client.ConditionTrue, "", "")
v.NodeStatus.CurrentScheduleStatus = !v.Unschedulable
if v.Role.HasRule("compute") {
k8sNode, err := n.kubecli.GetNode(v.ID)
@ -148,12 +137,9 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
// Update k8s node status to node status
if k8sNode != nil {
v.UpdataK8sCondition(k8sNode.Status.Conditions)
if v.AvailableCPU == 0 {
v.AvailableCPU = k8sNode.Status.Capacity.Cpu().Value()
}
if v.AvailableMemory == 0 {
v.AvailableMemory = k8sNode.Status.Capacity.Memory().Value()
}
// 添加capacity属性对应相关属性
v.AvailableCPU = k8sNode.Status.Allocatable.Cpu().Value()
v.AvailableMemory = k8sNode.Status.Allocatable.Memory().Value()
v.NodeStatus.KubeNode = k8sNode
v.NodeStatus.KubeUpdateTime = time.Now()
v.NodeStatus.CurrentScheduleStatus = !k8sNode.Spec.Unschedulable
@ -161,25 +147,28 @@ func (n *Cluster) handleNodeStatus(v *client.HostNode) {
}
}
if (v.Role.HasRule("manage") || v.Role.HasRule("gateway")) && !v.Role.HasRule("compute") { //manage install_success == runnint
if v.AvailableCPU == 0 {
v.AvailableCPU = v.NodeStatus.NodeInfo.NumCPU
}
if v.AvailableMemory == 0 {
v.AvailableMemory = int64(v.NodeStatus.NodeInfo.MemorySize)
}
v.AvailableCPU = v.NodeStatus.NodeInfo.NumCPU
v.AvailableMemory = int64(v.NodeStatus.NodeInfo.MemorySize)
}
//handle status
v.Status = v.NodeStatus.Status
if v.Role.HasRule("compute") && v.NodeStatus.KubeNode == nil {
v.Status = "offline"
}
for _, con := range v.NodeStatus.Conditions {
for i, con := range v.NodeStatus.Conditions {
if con.Type == client.NodeReady {
v.NodeStatus.NodeHealth = con.Status == client.ConditionTrue
break
}
if time.Since(con.LastHeartbeatTime) > time.Minute*1 {
// do not update time
v.NodeStatus.Conditions[i].Reason = "Condition not updated in more than 1 minute"
v.NodeStatus.Conditions[i].Message = "Condition not updated in more than 1 minute"
v.NodeStatus.Conditions[i].Status = client.ConditionUnknown
}
}
}
//node ready condition update
v.UpdateReadyStatus()
if v.NodeStatus.AdviceAction != nil {
for _, action := range v.NodeStatus.AdviceAction {
if action == "unscheduler" {
@ -299,3 +288,24 @@ func checkLabels(node *client.HostNode, labels map[string]string) bool {
}
return true
}
func getNodeHealth(node *client.HostNode) bool {
healthURL := fmt.Sprintf("http://%s:6100/v2/ping", node.InternalIP)
for i := 0; i < 3; i++ {
req, err := http.NewRequest("GET", healthURL, nil)
if err != nil {
logrus.Errorf("new node health check request failure %s", err.Error())
continue
}
client := http.DefaultClient
client.Timeout = time.Second * 2
res, err := client.Do(req)
if err != nil {
continue
}
if res != nil && res.StatusCode == 200 {
return true
}
}
return false
}

View File

@ -22,9 +22,13 @@ import (
"context"
"encoding/json"
"fmt"
"net/url"
"path"
"strings"
"time"
"github.com/goodrain/rainbond/util"
"github.com/Sirupsen/logrus"
"github.com/coreos/etcd/clientv3"
"github.com/goodrain/rainbond/cmd"
@ -33,9 +37,12 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
)
// RainbondEndpointPrefix is the prefix of the key of the rainbond endpoints in etcd
const RainbondEndpointPrefix = "/rainbond/endpoint"
//ClusterClient ClusterClient
type ClusterClient interface {
UpdateStatus(*HostNode) error
UpdateStatus(*HostNode, []NodeConditionType) error
DownNode(*HostNode) error
GetMasters() ([]*HostNode, error)
GetNode(nodeID string) (*HostNode, error)
@ -43,7 +50,7 @@ type ClusterClient interface {
GetDataCenterConfig() (*config.DataCenterConfig, error)
GetOptions() *option.Conf
GetEndpoints(key string) []string
SetEndpoints(key string, value []string)
SetEndpoints(serviceName, hostIP string, value []string)
DelEndpoints(key string)
}
@ -59,7 +66,7 @@ type etcdClusterClient struct {
onlineLes clientv3.LeaseID
}
func (e *etcdClusterClient) UpdateStatus(n *HostNode) error {
func (e *etcdClusterClient) UpdateStatus(n *HostNode, deleteConditions []NodeConditionType) error {
existNode, err := e.GetNode(n.ID)
if err != nil {
return fmt.Errorf("get node %s failure where update node %s", n.ID, err.Error())
@ -85,7 +92,12 @@ func (e *etcdClusterClient) UpdateStatus(n *HostNode) error {
}
}
existNode.Labels = newLabels
//update condition and delete old condition
existNode.UpdataCondition(n.NodeStatus.Conditions...)
for _, t := range deleteConditions {
existNode.DeleteCondition(t)
logrus.Infof("remove old condition %s", t)
}
return e.Update(existNode)
}
@ -102,7 +114,7 @@ func (e *etcdClusterClient) GetOptions() *option.Conf {
}
func (e *etcdClusterClient) GetEndpoints(key string) (result []string) {
key = "/rainbond/endpoint/" + key
key = path.Join(RainbondEndpointPrefix, key)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
resp, err := e.conf.EtcdCli.Get(ctx, key, clientv3.WithPrefix())
@ -111,20 +123,46 @@ func (e *etcdClusterClient) GetEndpoints(key string) (result []string) {
return
}
for _, kv := range resp.Kvs {
keyInfo := strings.Split(string(kv.Key), "/")
if !util.CheckIP(keyInfo[len(keyInfo)-1]) {
e.conf.EtcdCli.Delete(ctx, string(kv.Key))
continue
}
var res []string
err = json.Unmarshal(kv.Value, &res)
if err != nil {
logrus.Errorf("Can unmarshal endpoints to array of the key %s", key)
return
}
result = append(result, res...)
//Return data check
for _, v := range res {
endpointURL, err := url.Parse(v)
if err != nil || endpointURL.Host == "" || endpointURL.Path != "" {
continue
}
result = append(result, v)
}
}
logrus.Infof("Get endpoints %s => %v", key, result)
return
}
func (e *etcdClusterClient) SetEndpoints(key string, value []string) {
key = "/rainbond/endpoint/" + key
//SetEndpoints service name and hostip must set
func (e *etcdClusterClient) SetEndpoints(serviceName, hostIP string, value []string) {
if serviceName == "" {
return
}
if !util.CheckIP(hostIP) {
return
}
for _, v := range value {
endpointURL, err := url.Parse(v)
if err != nil || endpointURL.Host == "" || endpointURL.Path != "" {
logrus.Warningf("%s service host %s endpoint value %s invalid", serviceName, hostIP, v)
continue
}
}
key := fmt.Sprintf("%s/%s/%s", RainbondEndpointPrefix, serviceName, hostIP)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
jsonStr, err := json.Marshal(value)
@ -139,7 +177,7 @@ func (e *etcdClusterClient) SetEndpoints(key string, value []string) {
}
func (e *etcdClusterClient) DelEndpoints(key string) {
key = "/rainbond/endpoint/" + key
key = path.Join(RainbondEndpointPrefix, key)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
_, err := e.conf.EtcdCli.Delete(ctx, key)

View File

@ -21,12 +21,14 @@ package client
import (
"context"
"encoding/json"
"testing"
"time"
"github.com/Sirupsen/logrus"
"github.com/coreos/etcd/clientv3"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/node/core/store"
"github.com/goodrain/rainbond/util"
"testing"
"time"
)
func TestEtcdClusterClient_GetEndpoints(t *testing.T) {
@ -104,3 +106,51 @@ func TestEtcdClusterClient_GetEndpoints(t *testing.T) {
}
}
}
func TestSetEndpoints(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"},
DialTimeout: time.Duration(5) * time.Second,
})
if err != nil {
t.Fatal(err)
}
c := NewClusterClient(&option.Conf{EtcdCli: cli})
c.SetEndpoints("etcd", "DSASD", []string{"http://:8080"})
c.SetEndpoints("etcd", "192.168.1.1", []string{"http://:8080"})
c.SetEndpoints("etcd", "192.168.1.1", []string{"http://192.168.1.1:8080"})
}
func TestGetEndpoints(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{"127.0.0.1:2379"},
DialTimeout: time.Duration(5) * time.Second,
})
if err != nil {
t.Fatal(err)
}
c := NewClusterClient(&option.Conf{EtcdCli: cli})
t.Log(c.GetEndpoints("/etcd/"))
}
func TestEtcdClusterClient_ListEndpointKeys(t *testing.T) {
cfg := &option.Conf{
Etcd: clientv3.Config{
Endpoints: []string{"192.168.3.3:2379"},
DialTimeout: 5 * time.Second,
},
}
if err := store.NewClient(cfg); err != nil {
t.Fatalf("error create etcd client: %v", err)
}
hostNode := HostNode{
InternalIP: "192.168.2.76",
}
keys, err := hostNode.listEndpointKeys()
if err != nil {
t.Errorf("unexperted error: %v", err)
}
t.Logf("keys: %#v", keys)
}

View File

@ -30,7 +30,7 @@ import (
"github.com/goodrain/rainbond/node/core/store"
"github.com/goodrain/rainbond/util"
"github.com/pquerna/ffjson/ffjson"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
//LabelOS node label about os
@ -306,34 +306,14 @@ func (n *HostNode) UpdateReadyStatus() {
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node health is false", con.Type)
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(NodeReady) {
n.NodeStatus.Conditions[i].Reason = Reason
n.NodeStatus.Conditions[i].Message = Message
n.NodeStatus.Conditions[i].LastHeartbeatTime = time.Now()
if con.Status != status {
n.NodeStatus.Conditions[i].LastTransitionTime = time.Now()
n.NodeStatus.Conditions[i].Status = status
}
return
}
}
ready := NodeCondition{
Type: NodeReady,
Status: status,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Reason: Reason,
Message: Message,
}
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, ready)
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition get condition
@ -346,6 +326,31 @@ func (n *HostNode) GetCondition(ctype NodeConditionType) *NodeCondition {
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition 更新状态
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
@ -365,7 +370,6 @@ func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
n.UpdateReadyStatus()
}
}
@ -441,6 +445,18 @@ const (
PIDPressure NodeConditionType = "PIDPressure"
)
var masterCondition = []NodeConditionType{NodeReady, KubeNodeReady, NodeUp, InstallNotReady, NodeInit, OutOfDisk, MemoryPressure, DiskPressure, PIDPressure}
//IsMasterCondition Whether it is a preset condition of the system
func IsMasterCondition(con NodeConditionType) bool {
for _, c := range masterCondition {
if c.Compare(con) {
return true
}
}
return false
}
//Compare 比较
func (nt NodeConditionType) Compare(ent NodeConditionType) bool {
return string(nt) == string(ent)
@ -499,22 +515,30 @@ func (n *HostNode) DeleteNode() (*client.DeleteResponse, error) {
// DelEndpoints -
func (n *HostNode) DelEndpoints() {
keys := n.listEndpointKeys()
keys, err := n.listEndpointKeys()
if err != nil {
logrus.Warningf("error deleting endpoints: %v", err)
return
}
for _, key := range keys {
key = key + n.InternalIP
res, err := store.DefalutClient.Delete(key)
_, err := store.DefalutClient.Delete(key)
if err != nil {
logrus.Warnf("key: %s; error delete endpoints: %v", key, err)
}
fmt.Printf("key: %s; response of deleting endpoints: %+v\n", key, res)
}
}
func (n *HostNode) listEndpointKeys() []string {
// TODO: need improvement, not hard code
return []string{
"/rainbond/endpoint/APISERVER_ENDPOINTS/",
"/rainbond/endpoint/HUB_ENDPOINTS/",
"/rainbond/endpoint/REPO_ENDPOINTS/",
func (n *HostNode) listEndpointKeys() ([]string, error) {
resp, err := store.DefalutClient.Get(RainbondEndpointPrefix, client.WithPrefix())
if err != nil {
return nil, fmt.Errorf("prefix: %s; error list rainbond endpoint keys by prefix: %v", RainbondEndpointPrefix, err)
}
var res []string
for _, kv := range resp.Kvs {
key := string(kv.Key)
if strings.Contains(key, n.InternalIP) {
res = append(res, key)
}
}
return res, nil
}

View File

@ -28,7 +28,7 @@ import (
type Manager interface {
Start(*client.HostNode) error
Stop() error
GetAllService() (*[]*service.Service, error)
GetAllService() ([]*service.Service, error)
Online() error
Offline() error
ReLoadServices() error

View File

@ -52,21 +52,21 @@ type ManagerService struct {
ctr Controller
cluster client.ClusterClient
healthyManager healthy.Manager
services *[]*service.Service
allservice *[]*service.Service
services []*service.Service
allservice []*service.Service
etcdcli *clientv3.Client
autoStatusController map[string]statusController
lock sync.Mutex
}
//GetAllService get all service
func (m *ManagerService) GetAllService() (*[]*service.Service, error) {
func (m *ManagerService) GetAllService() ([]*service.Service, error) {
return m.allservice, nil
}
//GetService get service
func (m *ManagerService) GetService(serviceName string) *service.Service {
for _, s := range *m.allservice {
for _, s := range m.allservice {
if s.Name == serviceName {
return s
}
@ -80,20 +80,20 @@ func (m *ManagerService) Start(node *client.HostNode) error {
m.loadServiceConfig()
m.node = node
if m.conf.EnableInitStart {
return m.ctr.InitStart(*m.services)
return m.ctr.InitStart(m.services)
}
return nil
}
func (m *ManagerService) loadServiceConfig() {
*m.allservice = service.LoadServicesFromLocal(m.conf.ServiceListFile)
m.allservice = service.LoadServicesFromLocal(m.conf.ServiceListFile)
var controllerServices []*service.Service
for _, s := range *m.allservice {
for _, s := range m.allservice {
if !s.OnlyHealthCheck && !s.Disable {
controllerServices = append(controllerServices, s)
}
}
*m.services = controllerServices
m.services = controllerServices
}
//Stop stop manager
@ -112,7 +112,7 @@ func (m *ManagerService) Online() error {
go m.StartServices()
m.SyncServiceStatusController()
// registry local services endpoint into cluster manager
for _, s := range *m.services {
for _, s := range m.services {
m.UpOneServiceEndpoint(s)
}
return nil
@ -124,14 +124,14 @@ func (m *ManagerService) SetEndpoints(hostIP string) {
logrus.Warningf("ignore wrong hostIP: %s", hostIP)
return
}
for _, s := range *m.services {
for _, s := range m.services {
m.UpOneServiceEndpoint(s)
}
}
//StartServices start services
func (m *ManagerService) StartServices() {
for _, service := range *m.services {
for _, service := range m.services {
if !service.Disable {
logrus.Infof("Begin start service %s", service.Name)
if err := m.ctr.WriteConfig(service); err != nil {
@ -150,11 +150,11 @@ func (m *ManagerService) StartServices() {
func (m *ManagerService) Offline() error {
logrus.Info("Doing node offline by node controller manager")
services, _ := m.GetAllService()
for _, s := range *services {
for _, s := range services {
m.DownOneServiceEndpoint(s)
}
m.StopSyncService()
if err := m.ctr.StopList(*m.services); err != nil {
if err := m.ctr.StopList(m.services); err != nil {
return err
}
return nil
@ -171,7 +171,7 @@ func (m *ManagerService) DownOneServiceEndpoint(s *service.Service) {
if exist := isExistEndpoint(oldEndpoints, endpoint); exist {
endpoints := rmEndpointFrom(oldEndpoints, endpoint)
if len(endpoints) > 0 {
m.cluster.SetEndpoints(key, endpoints)
m.cluster.SetEndpoints(end.Name, m.cluster.GetOptions().HostIP, endpoints)
continue
}
m.cluster.DelEndpoints(key)
@ -190,16 +190,13 @@ func (m *ManagerService) UpOneServiceEndpoint(s *service.Service) {
if end.Name == "" || strings.Replace(end.Port, " ", "", -1) == "" {
continue
}
key := end.Name + "/" + hostIP
logrus.Debug("Discovery endpoints: ", key)
endpoint := toEndpoint(end, hostIP)
m.cluster.SetEndpoints(key, []string{endpoint})
m.cluster.SetEndpoints(end.Name, hostIP, []string{endpoint})
}
}
//SyncServiceStatusController synchronize all service status to as we expect
func (m *ManagerService) SyncServiceStatusController() {
logrus.Debug("run SyncServiceStatusController")
m.lock.Lock()
defer m.lock.Unlock()
if m.autoStatusController != nil && len(m.autoStatusController) > 0 {
@ -207,8 +204,8 @@ func (m *ManagerService) SyncServiceStatusController() {
v.Stop()
}
}
m.autoStatusController = make(map[string]statusController, len(*m.services))
for _, s := range *m.services {
m.autoStatusController = make(map[string]statusController, len(m.services))
for _, s := range m.services {
if s.ServiceHealth == nil {
continue
}
@ -284,16 +281,16 @@ func (s *statusController) Run() {
if s.healthHandle != nil {
s.healthHandle(event, s.watcher)
}
logrus.Debugf("is [%s] of service %s.", event.Status, event.Name)
logrus.Debugf("service %s status is [%s]", event.Name, event.Status)
case service.Stat_unhealthy:
if s.service.ServiceHealth != nil {
if event.ErrorNumber > s.service.ServiceHealth.MaxErrorsNum {
logrus.Infof("is [%s] of service %s %d times and restart it.", event.Status, event.Name, event.ErrorNumber)
logrus.Warningf("service %s status is [%s] more than %d times and restart it.", event.Name, event.Status, s.service.ServiceHealth.MaxErrorsNum)
s.unhealthHandle(event, s.watcher)
}
}
case service.Stat_death:
logrus.Infof("is [%s] of service %s %d times and start it.", event.Status, event.Name, event.ErrorNumber)
logrus.Warningf("service %s status is [%s] will restart it.", event.Name, event.Status)
s.unhealthHandle(event, s.watcher)
}
case <-s.ctx.Done():
@ -353,7 +350,7 @@ func (m *ManagerService) ReLoadServices() error {
controllerServices = append(controllerServices, ne)
}
exists := false
for _, old := range *m.services {
for _, old := range m.services {
if ne.Name == old.Name {
if ne.Disable {
m.ctr.StopService(ne.Name)
@ -383,9 +380,9 @@ func (m *ManagerService) ReLoadServices() error {
}
}
}
*m.allservice = services
*m.services = controllerServices
m.healthyManager.AddServicesAndUpdate(m.services)
m.allservice = services
m.services = controllerServices
m.healthyManager.AddServicesAndUpdate(m.allservice)
m.SyncServiceStatusController()
logrus.Infof("load service config success, start or stop %d service and total %d service", restartCount, len(services))
return nil
@ -393,7 +390,7 @@ func (m *ManagerService) ReLoadServices() error {
//StartService start a service
func (m *ManagerService) StartService(serviceName string) error {
for _, service := range *m.services {
for _, service := range m.services {
if service.Name == serviceName {
if !service.Disable {
return fmt.Errorf("service %s is running", serviceName)
@ -406,12 +403,12 @@ func (m *ManagerService) StartService(serviceName string) error {
//StopService start a service
func (m *ManagerService) StopService(serviceName string) error {
for i, service := range *m.services {
for i, service := range m.services {
if service.Name == serviceName {
if service.Disable {
return fmt.Errorf("service %s is stoped", serviceName)
}
(*m.services)[i].Disable = true
(m.services)[i].Disable = true
m.lock.Lock()
defer m.lock.Unlock()
if controller, ok := m.autoStatusController[serviceName]; ok {
@ -425,7 +422,7 @@ func (m *ManagerService) StopService(serviceName string) error {
//WriteServices write services
func (m *ManagerService) WriteServices() error {
for _, s := range *m.services {
for _, s := range m.services {
if s.OnlyHealthCheck {
continue
}
@ -534,8 +531,6 @@ func NewManagerService(conf *option.Conf, healthyManager healthy.Manager, cluste
cluster: cluster,
healthyManager: healthyManager,
etcdcli: conf.EtcdCli,
services: new([]*service.Service),
allservice: new([]*service.Service),
}
manager.ctr = NewController(conf, manager)
return manager

View File

@ -36,7 +36,7 @@ func OneNodeClusterLoadAssignment(serviceAlias, namespace string, endpoints []*c
for i := range services {
if domain, ok := services[i].Annotations["domain"]; ok && domain != "" {
logrus.Warnf("service[sid: %s] endpoint id domain endpoint[domain: %s], use dns cluster type, do not create eds", services[i].GetUID(), domain)
return
continue
}
service := services[i]
destServiceAlias := GetServiceAliasByService(service)

View File

@ -38,9 +38,9 @@ type Manager interface {
WatchServiceHealthy(serviceName string) Watcher
CloseWatch(serviceName string, id string) error
Start(hostNode *client.HostNode) error
AddServices(*[]*service.Service) error
AddServices([]*service.Service) error
GetServiceHealth() map[string]*service.HealthStatus
AddServicesAndUpdate(*[]*service.Service) error
AddServicesAndUpdate([]*service.Service) error
Stop() error
DisableWatcher(serviceName, watcherID string)
EnableWatcher(serviceName, watcherID string)
@ -63,7 +63,7 @@ type watcher struct {
}
type probeManager struct {
services *[]*service.Service
services []*service.Service
serviceProbe map[string]probe.Probe
status map[string]*service.HealthStatus
ctx context.Context
@ -91,11 +91,11 @@ func CreateManager() Manager {
return m
}
func (p *probeManager) AddServices(inner *[]*service.Service) error {
func (p *probeManager) AddServices(inner []*service.Service) error {
p.services = inner
return nil
}
func (p *probeManager) AddServicesAndUpdate(inner *[]*service.Service) error {
func (p *probeManager) AddServicesAndUpdate(inner []*service.Service) error {
p.services = inner
p.updateServiceProbe()
return nil
@ -109,21 +109,30 @@ func (p *probeManager) Start(hostNode *client.HostNode) error {
}
func (p *probeManager) updateServiceProbe() {
p.lock.Lock()
defer p.lock.Unlock()
//stop all probe
for _, pro := range p.serviceProbe {
pro.Stop()
}
p.serviceProbe = make(map[string]probe.Probe, len(*p.services))
for _, v := range *p.services {
if v.ServiceHealth == nil {
//create new probe
p.serviceProbe = make(map[string]probe.Probe, len(p.services))
for i := range p.services {
service := p.services[i]
if service.ServiceHealth == nil {
continue
}
if v.Disable {
if service.Disable {
continue
}
serviceProbe := probe.CreateProbe(p.ctx, p.hostNode, p.statusChan, v)
serviceProbe, err := probe.CreateProbe(p.ctx, p.hostNode, p.statusChan, service)
if err != nil {
logrus.Warningf("create prose for service %s failure,%s", service.Name, err.Error())
}
if serviceProbe != nil {
p.serviceProbe[v.Name] = serviceProbe
p.serviceProbe[service.Name] = serviceProbe
serviceProbe.Check()
logrus.Infof("create probe for service %s", service.Name)
}
}
}
@ -154,20 +163,24 @@ func (p *probeManager) updateServiceStatus(status *service.HealthStatus) {
p.status[status.Name] = status
}
}
func (p *probeManager) updateWatcher(status *service.HealthStatus) {
p.lock.Lock()
defer p.lock.Unlock()
if watcherMap, ok := p.watches[status.Name]; ok {
for _, watcher := range watcherMap {
if watcher.enable {
watcher.statusChan <- status
}
}
}
}
func (p *probeManager) HandleStatus() {
for {
select {
case status := <-p.statusChan:
p.updateServiceStatus(status)
p.lock.Lock()
if watcherMap, ok := p.watches[status.Name]; ok {
for _, watcher := range watcherMap {
if watcher.enable {
watcher.statusChan <- status
}
}
}
p.lock.Unlock()
p.updateWatcher(status)
case <-p.ctx.Done():
return
}
@ -254,10 +267,10 @@ func (p *probeManager) WatchServiceHealthy(serviceName string) Watcher {
}
func (p *probeManager) GetCurrentServiceHealthy(serviceName string) (*service.HealthStatus, error) {
if len(*p.services) == 0 {
if len(p.services) == 0 {
return nil, errors.New("services list is empty")
}
for _, v := range *p.services {
for _, v := range p.services {
if v.Name == serviceName {
if v.ServiceHealth.Model == "http" {
statusMap := probe.GetHTTPHealth(v.ServiceHealth.Address)
@ -291,5 +304,20 @@ func (p *probeManager) GetCurrentServiceHealthy(serviceName string) (*service.He
return nil, errors.New("the service does not exist")
}
func (p *probeManager) GetServiceHealth() map[string]*service.HealthStatus {
return p.status
p.lock.Lock()
defer p.lock.Unlock()
newstatus := make(map[string]*service.HealthStatus, len(p.services))
for _, s := range p.services {
if status, ok := p.status[s.Name]; ok {
newstatus[s.Name] = status
} else {
//before not have status set service status is health
newstatus[s.Name] = &service.HealthStatus{
Name: s.Name,
Status: service.Stat_healthy,
Info: "not have health probe or not have status",
}
}
}
return newstatus
}

View File

@ -12,7 +12,7 @@ import (
func TestProbeManager_Start(t *testing.T) {
m := CreateManager()
serviceList := make([]*service.Service, 0, 10)
var serviceList []*service.Service
h := &service.Service{
Name: "builder",
@ -44,7 +44,7 @@ func TestProbeManager_Start(t *testing.T) {
serviceList = append(serviceList, h)
serviceList = append(serviceList, h2)
serviceList = append(serviceList, h3)
m.AddServices(&serviceList)
m.AddServices(serviceList)
watcher1 := m.WatchServiceHealthy("webcli")
watcher2 := m.WatchServiceHealthy("worker")
watcher3 := m.WatchServiceHealthy("builder")

View File

@ -20,6 +20,8 @@ package probe
import (
"context"
"fmt"
"strings"
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/goodrain/rainbond/node/nodem/service"
@ -32,10 +34,12 @@ type Probe interface {
}
//CreateProbe create probe
func CreateProbe(ctx context.Context, hostNode *client.HostNode, statusChan chan *service.HealthStatus, v *service.Service) Probe {
func CreateProbe(ctx context.Context, hostNode *client.HostNode, statusChan chan *service.HealthStatus, v *service.Service) (Probe, error) {
ctx, cancel := context.WithCancel(ctx)
if v.ServiceHealth.Model == "http" {
h := &HttpProbe{
model := strings.ToLower(strings.TrimSpace(v.ServiceHealth.Model))
switch model {
case "http":
return &HttpProbe{
Name: v.ServiceHealth.Name,
Address: v.ServiceHealth.Address,
Ctx: ctx,
@ -44,11 +48,9 @@ func CreateProbe(ctx context.Context, hostNode *client.HostNode, statusChan chan
TimeInterval: v.ServiceHealth.TimeInterval,
HostNode: hostNode,
MaxErrorsNum: v.ServiceHealth.MaxErrorsNum,
}
return h
}
if v.ServiceHealth.Model == "tcp" {
t := &TcpProbe{
}, nil
case "tcp":
return &TcpProbe{
Name: v.ServiceHealth.Name,
Address: v.ServiceHealth.Address,
Ctx: ctx,
@ -57,11 +59,9 @@ func CreateProbe(ctx context.Context, hostNode *client.HostNode, statusChan chan
TimeInterval: v.ServiceHealth.TimeInterval,
HostNode: hostNode,
MaxErrorsNum: v.ServiceHealth.MaxErrorsNum,
}
return t
}
if v.ServiceHealth.Model == "cmd" {
s := &ShellProbe{
}, nil
case "cmd":
return &ShellProbe{
Name: v.ServiceHealth.Name,
Address: v.ServiceHealth.Address,
Ctx: ctx,
@ -70,9 +70,9 @@ func CreateProbe(ctx context.Context, hostNode *client.HostNode, statusChan chan
TimeInterval: v.ServiceHealth.TimeInterval,
HostNode: hostNode,
MaxErrorsNum: v.ServiceHealth.MaxErrorsNum,
}
return s
}, nil
default:
cancel()
return nil, fmt.Errorf("service %s probe mode %s not support ", v.Name, model)
}
cancel()
return nil
}

View File

@ -11,6 +11,7 @@ import (
"github.com/goodrain/rainbond/node/nodem/service"
)
//ShellProbe -
type ShellProbe struct {
Name string
Address string
@ -22,12 +23,17 @@ type ShellProbe struct {
MaxErrorsNum int
}
//Check -
func (h *ShellProbe) Check() {
go h.ShellCheck()
}
//Stop -
func (h *ShellProbe) Stop() {
h.Cancel()
}
//ShellCheck -
func (h *ShellProbe) ShellCheck() {
timer := time.NewTimer(time.Second * time.Duration(h.TimeInterval))
defer timer.Stop()
@ -48,10 +54,9 @@ func (h *ShellProbe) ShellCheck() {
}
}
// GetShellHealth get shell health
func GetShellHealth(address string) map[string]string {
cmd := exec.Command("/bin/bash", "-c", address)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr

View File

@ -207,6 +207,9 @@ func (c *ContainerLogManage) loollist() {
case <-ticker.C:
for _, container := range c.listContainer() {
cj, _ := c.getContainer(container.ID)
if cj.ContainerJSONBase == nil || cj.HostConfig == nil || cj.HostConfig.LogConfig.Type == "" {
continue
}
loggerType := cj.HostConfig.LogConfig.Type
if loggerType != "json-file" && loggerType != "syslog" {
continue

View File

@ -23,6 +23,8 @@ import (
"fmt"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/cmd/node/option"
)
@ -61,3 +63,20 @@ func TestGetConatainerLogger(t *testing.T) {
fmt.Println(string(line))
}
}
func TestHostConfig(t *testing.T) {
cj := new(types.ContainerJSON)
if cj.ContainerJSONBase == nil || cj.HostConfig == nil || cj.HostConfig.LogConfig.Type == "" {
fmt.Println("jsonBase is nil")
cj.ContainerJSONBase = new(types.ContainerJSONBase)
}
if cj.ContainerJSONBase == nil || cj.HostConfig == nil || cj.HostConfig.LogConfig.Type == "" {
fmt.Println("hostConfig is nil")
cj.HostConfig = &container.HostConfig{}
}
if cj.ContainerJSONBase == nil || cj.HostConfig == nil || cj.HostConfig.LogConfig.Type == "" {
fmt.Println("logconfig is nil won't panic")
return
}
}

View File

@ -176,7 +176,7 @@ func New(ctx logger.Info) (logger.Logger, error) {
config: ctx.Config,
serverAddress: address,
reConnecting: make(chan bool, 1),
cacheQueue: make(chan string, 2000),
cacheQueue: make(chan string, 20000),
intervalSendMicrosecondTime: 1000 * 10,
minIntervalSendMicrosecondTime: 1000,
closedChan: make(chan struct{}),
@ -244,7 +244,6 @@ func (s *StreamLog) send() {
s.sendMsg(msg)
break
case <-tike.C:
//每3秒发送健康消息
s.ping()
}
}
@ -253,7 +252,7 @@ func (s *StreamLog) sendMsg(msg string) {
if !s.writer.IsClosed() {
err := s.writer.Write(msg)
if err != nil {
logrus.Error("send log message to stream server error.", err.Error())
logrus.Debug("send log message to stream server error.", err.Error())
s.cache(msg)
neterr, ok := err.(net.Error)
if ok && neterr.Timeout() {
@ -262,13 +261,11 @@ func (s *StreamLog) sendMsg(msg string) {
s.reConect()
}
} else {
//如果发送正确无错误。加快发送速度
if s.intervalSendMicrosecondTime > s.minIntervalSendMicrosecondTime {
s.intervalSendMicrosecondTime -= 100
}
}
} else {
logrus.Error("the writer is closed.try reconect")
if len(s.reConnecting) < 1 {
s.reConect()
}

View File

@ -1,79 +1,45 @@
package streamlog
import (
"bufio"
"io"
"os"
"sync"
"testing"
"time"
"github.com/goodrain/rainbond/node/nodem/logger"
"fmt"
"github.com/pborman/uuid"
)
func TestStreamLogBeak(t *testing.T) {
log, err := New(logger.Info{
ContainerID: uuid.New(),
ContainerEnv: []string{"TENANT_ID=" + uuid.New(), "SERVICE_ID=" + uuid.New()},
Config: map[string]string{"stream-server": "127.0.0.1:6362"},
})
if err != nil {
t.Fatal(err)
return
}
fi, err := os.Open("./test/log.txt")
if err != nil {
fmt.Printf("Error: %s\n", err)
return
}
defer fi.Close()
defer log.Close()
br := bufio.NewReader(fi)
for {
a, _, c := br.ReadLine()
if c == io.EOF {
break
}
err := log.Log(&logger.Message{
Line: a,
Timestamp: time.Now(),
Source: "stdout",
})
if err != nil {
return
}
}
time.Sleep(10 * time.Second)
}
func TestStreamLog(t *testing.T) {
log, err := New(logger.Info{
ContainerID: uuid.New(),
ContainerEnv: []string{"TENANT_ID=" + uuid.New(), "SERVICE_ID=" + uuid.New()},
Config: map[string]string{"stream-server": "127.0.0.1:6362"},
})
if err != nil {
t.Fatal(err)
return
var wait sync.WaitGroup
for j := 0; j < 1000; j++ {
wait.Add(1)
go func() {
defer wait.Done()
log, err := New(logger.Info{
ContainerID: uuid.New(),
ContainerEnv: []string{"TENANT_ID=" + uuid.New(), "SERVICE_ID=" + uuid.New()},
Config: map[string]string{"stream-server": "192.168.2.203:6362"},
})
if err != nil {
t.Fatal(err)
return
}
defer log.Close()
for i := 0; i < 500000; i++ {
err := log.Log(&logger.Message{
Line: []byte("hello word!hello word!hello word!hello word!hello word!hello word!asdasfmaksmfkasmfkamsmakmskamsdaskdaksdmaksmdkamsdkamsdkmaksdmaksdmkamsdkamsdkaksdakdmklamdlkamdsklmalksdmlkamsdlkamdlkamsdlkmalksmdlkadmlkam"),
Timestamp: time.Now(),
Source: "stdout",
})
if err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 2)
}
}()
}
for i := 0; i < 5000; i++ {
err := log.Log(&logger.Message{
Line: []byte("hello word!hello word!hello word!hello word!hello word!hello word!asdasfmaksmfkasmfkamsmakmskamsdaskdaksdmaksmdkamsdkamsdkmaksdmaksdmkamsdkamsdkaksdakdmklamdlkamdsklmalksdmlkamsdlkamdlkamsdlkmalksmdlkadmlkam"),
Timestamp: time.Now(),
Source: "stdout",
})
if err != nil {
return
}
//time.Sleep(time.Millisecond)
}
//time.Sleep(10 * time.Second)
log.Close()
wait.Wait()
}
func BenchmarkStreamLog(t *testing.B) {

View File

@ -184,10 +184,10 @@ func (n *NodeManager) CheckNodeHealthy() (bool, error) {
if err != nil {
return false, fmt.Errorf("get all services error,%s", err.Error())
}
for _, v := range *services {
for _, v := range services {
result, ok := n.healthy.GetServiceHealthy(v.Name)
if ok {
if result.Status != service.Stat_healthy {
if result.Status != service.Stat_healthy && result.Status != service.Stat_Unknow {
return false, fmt.Errorf(result.Info)
}
} else {
@ -199,72 +199,52 @@ func (n *NodeManager) CheckNodeHealthy() (bool, error) {
func (n *NodeManager) heartbeat() {
util.Exec(n.ctx, func() error {
//TODO:Judge state
allServiceHealth := n.healthy.GetServiceHealth()
allHealth := true
currentNode, err := n.getCurrentNode(n.currentNode.ID)
if n.currentNode == nil {
logrus.Warningf("get current node by id %s error: %v", n.currentNode.ID, err)
return err
}
n.currentNode.NodeStatus.NodeInfo = currentNode.NodeStatus.NodeInfo
for k, v := range allServiceHealth {
if ser := n.controller.GetService(k); ser != nil {
status := client.ConditionTrue
message := ""
reason := ""
if ser.ServiceHealth != nil {
maxNum := ser.ServiceHealth.MaxErrorsNum
if maxNum < 2 {
maxNum = 2
}
if v.Status != service.Stat_healthy && v.ErrorNumber > maxNum {
if v.Status != service.Stat_healthy && v.Status != service.Stat_Unknow && v.ErrorNumber > maxNum {
allHealth = false
n.currentNode.UpdataCondition(
client.NodeCondition{
Type: client.NodeConditionType(ser.Name),
Status: client.ConditionFalse,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Message: v.Info,
Reason: "NotHealth",
})
}
if v.Status == service.Stat_healthy {
old := n.currentNode.GetCondition(client.NodeConditionType(ser.Name))
if old == nil || old.Status == client.ConditionFalse {
n.currentNode.UpdataCondition(
client.NodeCondition{
Type: client.NodeConditionType(ser.Name),
Status: client.ConditionTrue,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
Reason: "Health",
})
}
}
if n.cfg.AutoUnschedulerUnHealthDuration == 0 {
continue
}
if v.ErrorDuration > n.cfg.AutoUnschedulerUnHealthDuration && n.cfg.AutoScheduler {
n.currentNode.NodeStatus.AdviceAction = []string{"unscheduler"}
}
} else {
old := n.currentNode.GetCondition(client.NodeConditionType(ser.Name))
if old == nil {
n.currentNode.UpdataCondition(
client.NodeCondition{
Type: client.NodeConditionType(ser.Name),
Status: client.ConditionTrue,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
})
status = client.ConditionFalse
message = v.Info
reason = "NotHealth"
}
}
n.currentNode.GetAndUpdateCondition(client.NodeConditionType(ser.Name), status, reason, message)
if n.cfg.AutoUnschedulerUnHealthDuration == 0 {
continue
}
if v.ErrorDuration > n.cfg.AutoUnschedulerUnHealthDuration && n.cfg.AutoScheduler {
n.currentNode.NodeStatus.AdviceAction = []string{"unscheduler"}
logrus.Warningf("node unhealth more than %s, will send unscheduler advice action to master", n.cfg.AutoUnschedulerUnHealthDuration.String())
}
} else {
logrus.Errorf("can not find service %s", k)
}
}
//remove old condition
var deleteCondition []client.NodeConditionType
for _, con := range n.currentNode.NodeStatus.Conditions {
if n.controller.GetService(string(con.Type)) == nil && !client.IsMasterCondition(con.Type) {
deleteCondition = append(deleteCondition, con.Type)
}
}
//node ready condition update
n.currentNode.UpdateReadyStatus()
if allHealth && n.cfg.AutoScheduler {
n.currentNode.NodeStatus.AdviceAction = []string{"scheduler"}
}
n.currentNode.NodeStatus.Status = "running"
if err := n.cluster.UpdateStatus(n.currentNode); err != nil {
if err := n.cluster.UpdateStatus(n.currentNode, deleteCondition); err != nil {
logrus.Errorf("update node status error %s", err.Error())
}
if n.currentNode.NodeStatus.Status != "running" {
@ -296,13 +276,14 @@ func (n *NodeManager) init() error {
return fmt.Errorf("find node %s from cluster failure %s", n.currentNode.ID, err.Error())
}
}
if node.NodeStatus.NodeInfo.OperatingSystem == "" {
node.NodeStatus.NodeInfo = info.GetSystemInfo()
}
//update node mode
node.Mode = n.cfg.RunMode
//update node rule
node.Role = strings.Split(n.cfg.NodeRule, ",")
//update system info
if !node.Role.HasRule("compute") {
node.NodeStatus.NodeInfo = info.GetSystemInfo()
}
//set node labels
n.setNodeLabels(node)
*(n.currentNode) = *node
@ -359,12 +340,7 @@ func (n *NodeManager) getCurrentNode(uid string) (*client.HostNode, error) {
node := CreateNode(uid, n.cfg.HostIP)
n.setNodeLabels(&node)
node.NodeStatus.NodeInfo = info.GetSystemInfo()
node.UpdataCondition(client.NodeCondition{
Type: client.NodeInit,
Status: client.ConditionTrue,
LastHeartbeatTime: time.Now(),
LastTransitionTime: time.Now(),
})
node.GetAndUpdateCondition(client.NodeInit, client.ConditionTrue, "", "")
node.Mode = n.cfg.RunMode
node.NodeStatus.Status = "running"
return &node, nil

View File

@ -27,6 +27,7 @@ import (
)
const (
Stat_Unknow string = "unknow" //健康
Stat_healthy string = "healthy" //健康
Stat_unhealthy string = "unhealthy" //出现异常
Stat_death string = "death" //请求不通

View File

@ -530,7 +530,7 @@ func Zip(source, target string) error {
func Unzip(archive, target string) error {
reader, err := zip.OpenDirectReader(archive)
if err != nil {
return err
return fmt.Errorf("error opening archive: %v", err)
}
if err := os.MkdirAll(target, 0755); err != nil {
return err
@ -546,7 +546,7 @@ func Unzip(archive, target string) error {
uid, _ := strconv.Atoi(guid[0])
gid, _ := strconv.Atoi(guid[1])
if err := os.Chown(path, uid, gid); err != nil {
return err
return fmt.Errorf("error changing owner: %v", err)
}
}
}
@ -555,17 +555,17 @@ func Unzip(archive, target string) error {
fileReader, err := file.Open()
if err != nil {
return err
return fmt.Errorf("fileReader; error opening file: %v", err)
}
defer fileReader.Close()
targetFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode())
if err != nil {
return err
return fmt.Errorf("targetFile; error opening file: %v", err)
}
defer targetFile.Close()
if _, err := io.Copy(targetFile, fileReader); err != nil {
return err
return fmt.Errorf("error copy file: %v", err)
}
if file.Comment != "" && strings.Contains(file.Comment, "/") {
guid := strings.Split(file.Comment, "/")

Some files were not shown because too many files have changed in this diff Show More