diff --git a/api/api/api_interface.go b/api/api/api_interface.go
index ddc7bc989..a406d0fe4 100644
--- a/api/api/api_interface.go
+++ b/api/api/api_interface.go
@@ -30,6 +30,17 @@ type ClusterInterface interface {
MavenSettingUpdate(w http.ResponseWriter, r *http.Request)
MavenSettingDelete(w http.ResponseWriter, r *http.Request)
MavenSettingDetail(w http.ResponseWriter, r *http.Request)
+ GetNamespace(w http.ResponseWriter, r *http.Request)
+ GetNamespaceResource(w http.ResponseWriter, r *http.Request)
+ ConvertResource(w http.ResponseWriter, r *http.Request)
+ ResourceImport(w http.ResponseWriter, r *http.Request)
+ AddResource(w http.ResponseWriter, r *http.Request)
+ DeleteResource(w http.ResponseWriter, r *http.Request)
+ UpdateResource(w http.ResponseWriter, r *http.Request)
+ SyncResource(w http.ResponseWriter, r *http.Request)
+ YamlResourceName(w http.ResponseWriter, r *http.Request)
+ YamlResourceDetailed(w http.ResponseWriter, r *http.Request)
+ YamlResourceImport(w http.ResponseWriter, r *http.Request)
}
//TenantInterface interface
@@ -88,6 +99,8 @@ type ServiceInterface interface {
AddServiceMonitors(w http.ResponseWriter, r *http.Request)
DeleteServiceMonitors(w http.ResponseWriter, r *http.Request)
UpdateServiceMonitors(w http.ResponseWriter, r *http.Request)
+ UploadPackage(w http.ResponseWriter, r *http.Request)
+ K8sAttributes(w http.ResponseWriter, r *http.Request)
}
//TenantInterfaceWithV1 funcs for both v2 and v1
@@ -153,6 +166,7 @@ type AppInterface interface {
NewUpload(w http.ResponseWriter, r *http.Request)
ImportID(w http.ResponseWriter, r *http.Request)
ImportApp(w http.ResponseWriter, r *http.Request)
+ UploadID(w http.ResponseWriter, r *http.Request)
}
// ApplicationInterface tenant application interface
@@ -172,7 +186,6 @@ type ApplicationInterface interface {
Install(w http.ResponseWriter, r *http.Request)
ListServices(w http.ResponseWriter, r *http.Request)
ListHelmAppReleases(w http.ResponseWriter, r *http.Request)
-
DeleteConfigGroup(w http.ResponseWriter, r *http.Request)
ListConfigGroups(w http.ResponseWriter, r *http.Request)
SyncComponents(w http.ResponseWriter, r *http.Request)
diff --git a/api/api_routers/version2/v2Routers.go b/api/api_routers/version2/v2Routers.go
index 51f977e84..e1d489525 100644
--- a/api/api_routers/version2/v2Routers.go
+++ b/api/api_routers/version2/v2Routers.go
@@ -94,6 +94,17 @@ func (v2 *V2) clusterRouter() chi.Router {
r.Get("/builder/mavensetting/{name}", controller.GetManager().MavenSettingDetail)
r.Put("/builder/mavensetting/{name}", controller.GetManager().MavenSettingUpdate)
r.Delete("/builder/mavensetting/{name}", controller.GetManager().MavenSettingDelete)
+ r.Get("/namespace", controller.GetManager().GetNamespace)
+ r.Get("/resource", controller.GetManager().GetNamespaceResource)
+ r.Get("/convert-resource", controller.GetManager().ConvertResource)
+ r.Post("/convert-resource", controller.GetManager().ResourceImport)
+ r.Post("/k8s-resource", controller.GetManager().AddResource)
+ r.Delete("/k8s-resource", controller.GetManager().DeleteResource)
+ r.Put("/k8s-resource", controller.GetManager().UpdateResource)
+ r.Post("/sync-k8s-resources", controller.GetManager().SyncResource)
+ r.Get("/yaml_resource_name", controller.GetManager().YamlResourceName)
+ r.Get("/yaml_resource_detailed", controller.GetManager().YamlResourceDetailed)
+ r.Post("/yaml_resource_import", controller.GetManager().YamlResourceImport)
return r
}
@@ -273,6 +284,10 @@ func (v2 *V2) serviceRouter() chi.Router {
r.Put("/label", middleware.WrapEL(controller.GetManager().Label, dbmodel.TargetTypeService, "update-service-label", dbmodel.SYNEVENTTYPE))
r.Delete("/label", middleware.WrapEL(controller.GetManager().Label, dbmodel.TargetTypeService, "delete-service-label", dbmodel.SYNEVENTTYPE))
+ // Component K8s properties are modified
+ r.Post("/k8s-attributes", middleware.WrapEL(controller.GetManager().K8sAttributes, dbmodel.TargetTypeService, "create-component-k8s-attributes", dbmodel.SYNEVENTTYPE))
+ r.Put("/k8s-attributes", middleware.WrapEL(controller.GetManager().K8sAttributes, dbmodel.TargetTypeService, "update-component-k8s-attributes", dbmodel.SYNEVENTTYPE))
+ r.Delete("/k8s-attributes", middleware.WrapEL(controller.GetManager().K8sAttributes, dbmodel.TargetTypeService, "delete-component-k8s-attributes", dbmodel.SYNEVENTTYPE))
//插件
r.Mount("/plugin", v2.serviceRelatePluginRouter())
@@ -383,6 +398,10 @@ func (v2 *V2) appRouter() chi.Router {
r.Post("/import", controller.GetManager().ImportApp)
r.Get("/import/{eventID}", controller.GetManager().ImportApp)
r.Delete("/import/{eventID}", controller.GetManager().ImportApp)
+
+ r.Post("/upload/events/{eventID}", controller.GetManager().UploadID)
+ r.Get("/upload/events/{eventID}", controller.GetManager().UploadID)
+ r.Delete("/upload/events/{eventID}", controller.GetManager().UploadID)
return r
}
diff --git a/api/api_routers/websocket/websocket.go b/api/api_routers/websocket/websocket.go
index 638bcefb8..62745523d 100644
--- a/api/api_routers/websocket/websocket.go
+++ b/api/api_routers/websocket/websocket.go
@@ -52,3 +52,11 @@ func AppRoutes() chi.Router {
r.Options("/upload/{eventID}", controller.GetManager().Upload)
return r
}
+
+//PackageBuildRoutes 本地文件上传路由
+func PackageBuildRoutes() chi.Router {
+ r := chi.NewRouter()
+ r.Post("/component/events/{eventID}", controller.GetManager().UploadPackage)
+ r.Options("/component/events/{eventID}", controller.GetManager().UploadPackage)
+ return r
+}
diff --git a/api/controller/app.go b/api/controller/app.go
index 0a7495466..d105c22a1 100644
--- a/api/controller/app.go
+++ b/api/controller/app.go
@@ -156,6 +156,75 @@ func (a *AppStruct) ImportID(w http.ResponseWriter, r *http.Request) {
}
}
+//UploadID -
+func (a *AppStruct) UploadID(w http.ResponseWriter, r *http.Request) {
+ eventID := strings.TrimSpace(chi.URLParam(r, "eventID"))
+ if eventID == "" {
+ httputil.ReturnError(r, w, 400, "Failed to parse eventID.")
+ return
+ }
+ dirName := fmt.Sprintf("/grdata/package_build/temp/events/%s", eventID)
+
+ switch r.Method {
+ case "POST":
+ err := os.MkdirAll(dirName, 0755)
+ if err != nil {
+ httputil.ReturnError(r, w, 502, "Failed to create directory by event id: "+err.Error())
+ return
+ }
+ httputil.ReturnSuccess(r, w, map[string]string{"path": dirName})
+ case "GET":
+ _, err := os.Stat(dirName)
+ if err != nil {
+ if !os.IsExist(err) {
+ err := os.MkdirAll(dirName, 0755)
+ if err != nil {
+ httputil.ReturnError(r, w, 502, "Failed to create directory by event id: "+err.Error())
+ return
+ }
+ }
+ }
+ packages, err := ioutil.ReadDir(dirName)
+ if err != nil {
+ httputil.ReturnSuccess(r, w, map[string][]string{"packages": {}})
+ return
+ }
+
+ packageArr := make([]string, 0, 10)
+ for _, dir := range packages {
+ if dir.IsDir() {
+ continue
+ }
+ ex := filepath.Ext(dir.Name())
+ if ex != ".jar" && ex != ".war" && ex != ".yaml" && ex != ".yml"{
+ continue
+ }
+ packageArr = append(packageArr, dir.Name())
+ }
+
+ httputil.ReturnSuccess(r, w, map[string][]string{"packages": packageArr})
+ case "DELETE":
+ cmd := exec.Command("rm", "-rf", dirName)
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil && err.Error() != "exit status 1" {
+ logrus.Errorf("rm -rf %s failed: %s", dirName, err.Error())
+ httputil.ReturnError(r, w, 501, "Failed to delete directory by id: "+eventID)
+ return
+ }
+ res, err := db.GetManager().AppDao().GetByEventId(eventID)
+ if err != nil {
+ httputil.ReturnError(r, w, 404, fmt.Sprintf("Failed to query status of export app by event id %s: %v", eventID, err))
+ return
+ }
+ res.Status = "cleaned"
+ db.GetManager().AppDao().UpdateModel(res)
+ httputil.ReturnSuccess(r, w, "successful")
+ }
+}
+
+
//NewUpload -
func (a *AppStruct) NewUpload(w http.ResponseWriter, r *http.Request) {
eventID := strings.TrimSpace(chi.URLParam(r, "eventID"))
diff --git a/api/controller/cluster.go b/api/controller/cluster.go
index 9b3eef616..c19345b8e 100644
--- a/api/controller/cluster.go
+++ b/api/controller/cluster.go
@@ -19,6 +19,7 @@
package controller
import (
+ "github.com/goodrain/rainbond/api/model"
"net/http"
"github.com/go-chi/chi"
@@ -101,3 +102,169 @@ func (t *ClusterController) MavenSettingDetail(w http.ResponseWriter, r *http.Re
}
httputil.ReturnSuccess(r, w, c)
}
+
+//GetNamespace Get the unconnected namespaces under the current cluster
+func (t *ClusterController) GetNamespace(w http.ResponseWriter, r *http.Request) {
+ content := r.FormValue("content")
+ ns, err := handler.GetClusterHandler().GetNamespace(r.Context(), content)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, ns)
+}
+
+//GetNamespaceResource Get all resources in the current namespace
+func (t *ClusterController) GetNamespaceResource(w http.ResponseWriter, r *http.Request) {
+ content := r.FormValue("content")
+ namespace := r.FormValue("namespace")
+ rs, err := handler.GetClusterHandler().GetNamespaceSource(r.Context(), content, namespace)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, rs)
+}
+
+//ConvertResource Get the resources under the current namespace to the rainbond platform
+func (t *ClusterController) ConvertResource(w http.ResponseWriter, r *http.Request) {
+ content := r.FormValue("content")
+ namespace := r.FormValue("namespace")
+ rs, err := handler.GetClusterHandler().GetNamespaceSource(r.Context(), content, namespace)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ appsServices, err := handler.GetClusterHandler().ConvertResource(r.Context(), namespace, rs)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, appsServices)
+}
+
+//ResourceImport Import the converted k8s resources into recognition
+func (t *ClusterController) ResourceImport(w http.ResponseWriter, r *http.Request) {
+ content := r.FormValue("content")
+ namespace := r.FormValue("namespace")
+ eid := r.FormValue("eid")
+ rs, err := handler.GetClusterHandler().GetNamespaceSource(r.Context(), content, namespace)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ appsServices, err := handler.GetClusterHandler().ConvertResource(r.Context(), namespace, rs)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ rri, err := handler.GetClusterHandler().ResourceImport(namespace, appsServices, eid)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, rri)
+}
+
+//AddResource -
+func (t *ClusterController) AddResource(w http.ResponseWriter, r *http.Request) {
+ var hr model.AddHandleResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &hr, nil); !ok {
+ return
+ }
+ rri, err := handler.GetClusterHandler().AddAppK8SResource(r.Context(), hr.Namespace, hr.AppID, hr.ResourceYaml)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, rri)
+}
+
+//UpdateResource -
+func (t *ClusterController) UpdateResource(w http.ResponseWriter, r *http.Request) {
+ var hr model.HandleResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &hr, nil); !ok {
+ return
+ }
+ rri, err := handler.GetClusterHandler().UpdateAppK8SResource(r.Context(), hr.Namespace, hr.AppID, hr.Name, hr.ResourceYaml, hr.Kind)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, rri)
+}
+
+//DeleteResource -
+func (t *ClusterController) DeleteResource(w http.ResponseWriter, r *http.Request) {
+ var hr model.HandleResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &hr, nil); !ok {
+ return
+ }
+ err := handler.GetClusterHandler().DeleteAppK8SResource(r.Context(), hr.Namespace, hr.AppID, hr.Name, hr.ResourceYaml, hr.Kind)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, nil)
+}
+
+// SyncResource -
+func (t *ClusterController) SyncResource(w http.ResponseWriter, r *http.Request) {
+ var req model.SyncResources
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &req, nil); !ok {
+ return
+ }
+ resources, err := handler.GetClusterHandler().SyncAppK8SResources(r.Context(), &req)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, resources)
+}
+
+//YamlResourceName -
+func (t *ClusterController) YamlResourceName(w http.ResponseWriter, r *http.Request) {
+ var yr model.YamlResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &yr, nil); !ok {
+ return
+ }
+ h, err := handler.GetClusterHandler().AppYamlResourceName(yr)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, h)
+}
+
+//YamlResourceDetailed -
+func (t *ClusterController) YamlResourceDetailed(w http.ResponseWriter, r *http.Request) {
+ var yr model.YamlResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &yr, nil); !ok {
+ return
+ }
+ h, err := handler.GetClusterHandler().AppYamlResourceDetailed(yr, false)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, h)
+}
+
+//YamlResourceImport -
+func (t *ClusterController) YamlResourceImport(w http.ResponseWriter, r *http.Request) {
+ var yr model.YamlResource
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &yr, nil); !ok {
+ return
+ }
+ ar, err := handler.GetClusterHandler().AppYamlResourceDetailed(yr, true)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ ac, err := handler.GetClusterHandler().AppYamlResourceImport(yr, ar)
+ if err != nil {
+ err.Handle(r, w)
+ return
+ }
+ httputil.ReturnSuccess(r, w, ac)
+}
diff --git a/api/controller/k8s_attribute.go b/api/controller/k8s_attribute.go
new file mode 100644
index 000000000..623aec0a2
--- /dev/null
+++ b/api/controller/k8s_attribute.go
@@ -0,0 +1,86 @@
+// RAINBOND, Application Management Platform
+// Copyright (C) 2022-2022 Goodrain Co., Ltd.
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version. For any non-GPL usage of Rainbond,
+// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
+// must be obtained first.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+package controller
+
+import (
+ "fmt"
+ "github.com/goodrain/rainbond/api/handler"
+ api_model "github.com/goodrain/rainbond/api/model"
+ ctxutil "github.com/goodrain/rainbond/api/util/ctx"
+ httputil "github.com/goodrain/rainbond/util/http"
+ "net/http"
+)
+
+// K8sAttributeController -
+type K8sAttributeController struct{}
+
+// K8sAttributes -
+func (k *K8sAttributeController) K8sAttributes(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ case "POST":
+ k.createK8sAttributes(w, r)
+ case "PUT":
+ k.updateK8sAttributes(w, r)
+ case "DELETE":
+ k.deleteK8sAttributes(w, r)
+ }
+}
+
+func (k *K8sAttributeController) createK8sAttributes(w http.ResponseWriter, r *http.Request) {
+ tenantID := r.Context().Value(ctxutil.ContextKey("tenant_id")).(string)
+ componentID := r.Context().Value(ctxutil.ContextKey("service_id")).(string)
+ var k8sAttr api_model.ComponentK8sAttribute
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &k8sAttr, nil); !ok {
+ httputil.ReturnBcodeError(r, w, fmt.Errorf("k8s attributes is not valid"))
+ return
+ }
+ if err := handler.GetServiceManager().CreateK8sAttribute(tenantID, componentID, &k8sAttr); err != nil {
+ httputil.ReturnBcodeError(r, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+}
+
+func (k *K8sAttributeController) updateK8sAttributes(w http.ResponseWriter, r *http.Request) {
+ componentID := r.Context().Value(ctxutil.ContextKey("service_id")).(string)
+ var k8sAttr api_model.ComponentK8sAttribute
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &k8sAttr, nil); !ok {
+ httputil.ReturnBcodeError(r, w, fmt.Errorf("k8s attributes is not valid"))
+ return
+ }
+ if err := handler.GetServiceManager().UpdateK8sAttribute(componentID, &k8sAttr); err != nil {
+ httputil.ReturnBcodeError(r, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+}
+
+func (k *K8sAttributeController) deleteK8sAttributes(w http.ResponseWriter, r *http.Request) {
+ componentID := r.Context().Value(ctxutil.ContextKey("service_id")).(string)
+ var req api_model.DeleteK8sAttributeReq
+ if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &req, nil); !ok {
+ httputil.ReturnBcodeError(r, w, fmt.Errorf("k8s attributes is not valid"))
+ return
+ }
+ if err := handler.GetServiceManager().DeleteK8sAttribute(componentID, req.Name); err != nil {
+ httputil.ReturnBcodeError(r, w, err)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+}
diff --git a/api/controller/resources.go b/api/controller/resources.go
index 5a1026307..c3cc7e401 100644
--- a/api/controller/resources.go
+++ b/api/controller/resources.go
@@ -61,6 +61,7 @@ type V2Routes struct {
PodController
ApplicationController
RegistryAuthSecretStruct
+ K8sAttributeController
}
//Show test
@@ -733,6 +734,7 @@ func (t *TenantStruct) UpdateService(w http.ResponseWriter, r *http.Request) {
"extend_method": []string{},
"app_id": []string{},
"k8s_component_name": []string{},
+ "job_strategy": []string{},
}
data, ok := httputil.ValidatorRequestMapAndErrorResponse(r, w, rules, nil)
if !ok {
diff --git a/api/controller/service_monitor.go b/api/controller/service_monitor.go
index d4a37334c..1aa495b31 100644
--- a/api/controller/service_monitor.go
+++ b/api/controller/service_monitor.go
@@ -1,7 +1,12 @@
package controller
import (
+ "fmt"
+ "github.com/sirupsen/logrus"
+ "io"
"net/http"
+ "os"
+ "strings"
"github.com/goodrain/rainbond/api/client/prometheus"
@@ -60,6 +65,59 @@ func (t *TenantStruct) UpdateServiceMonitors(w http.ResponseWriter, r *http.Requ
httputil.ReturnSuccess(r, w, tsm)
}
+//UploadPackage upload package
+func (t *TenantStruct) UploadPackage(w http.ResponseWriter, r *http.Request) {
+ eventID := strings.TrimSpace(chi.URLParam(r, "eventID"))
+ switch r.Method {
+ case "POST":
+ if eventID == "" {
+ httputil.ReturnError(r, w, 400, "Failed to parse eventID.")
+ return
+ }
+ logrus.Debug("Start receive upload file: ", eventID)
+ reader, header, err := r.FormFile("packageTarFile")
+ if err != nil {
+ logrus.Errorf("Failed to parse upload file: %s", err.Error())
+ httputil.ReturnError(r, w, 501, "Failed to parse upload file.")
+ return
+ }
+ defer reader.Close()
+
+ dirName := fmt.Sprintf("/grdata/package_build/temp/events/%s", eventID)
+ os.MkdirAll(dirName, 0755)
+
+ fileName := fmt.Sprintf("%s/%s", dirName, header.Filename)
+ file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ logrus.Errorf("Failed to open file: %s", err.Error())
+ httputil.ReturnError(r, w, 502, "Failed to open file: "+err.Error())
+ }
+ defer file.Close()
+
+ logrus.Debug("Start write file to: ", fileName)
+ if _, err := io.Copy(file, reader); err != nil {
+ logrus.Errorf("Failed to write file:%s", err.Error())
+ httputil.ReturnError(r, w, 503, "Failed to write file: "+err.Error())
+ }
+
+ logrus.Debug("successful write file to: ", fileName)
+ origin := r.Header.Get("Origin")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Methods", "POST,OPTIONS")
+ w.Header().Add("Access-Control-Allow-Credentials", "true")
+ w.Header().Add("Access-Control-Allow-Headers", "x-requested-with,Content-Type,X-Custom-Header")
+ httputil.ReturnSuccess(r, w, nil)
+
+ case "OPTIONS":
+ origin := r.Header.Get("Origin")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Methods", "POST,OPTIONS")
+ w.Header().Add("Access-Control-Allow-Credentials", "true")
+ w.Header().Add("Access-Control-Allow-Headers", "x-requested-with,Content-Type,X-Custom-Header")
+ httputil.ReturnSuccess(r, w, nil)
+ }
+}
+
//GetMonitorMetrics get monitor metrics
func GetMonitorMetrics(w http.ResponseWriter, r *http.Request) {
target := r.FormValue("target")
diff --git a/api/handler/application_handler.go b/api/handler/application_handler.go
index 48dc8c41c..dfc7a7552 100644
--- a/api/handler/application_handler.go
+++ b/api/handler/application_handler.go
@@ -646,6 +646,9 @@ func (a *ApplicationAction) SyncComponents(app *dbmodel.Application, components
if err := GetServiceManager().SyncComponentEndpoints(tx, components); err != nil {
return err
}
+ if err := GetServiceManager().SyncComponentK8sAttributes(tx, app, components); err != nil {
+ return err
+ }
if len(deleteComponentIDs) != 0 {
return a.deleteByComponentIDs(tx, app, deleteComponentIDs)
}
@@ -719,7 +722,10 @@ func (a *ApplicationAction) deleteByComponentIDs(tx *gorm.DB, app *dbmodel.Appli
if err = db.GetManager().TenantServceAutoscalerRulesDaoTransactions(tx).DeleteByComponentIDs(componentIDs); err != nil {
return err
}
- return db.GetManager().TenantServceAutoscalerRuleMetricsDaoTransactions(tx).DeleteByRuleIDs(autoScaleRuleIDs)
+ if err = db.GetManager().TenantServceAutoscalerRuleMetricsDaoTransactions(tx).DeleteByRuleIDs(autoScaleRuleIDs); err != nil {
+ return err
+ }
+ return db.GetManager().ComponentK8sAttributeDaoTransactions(tx).DeleteByComponentIDs(componentIDs)
}
// ListAppStatuses -
diff --git a/api/handler/cluster.go b/api/handler/cluster.go
index db557f03a..c9f577772 100644
--- a/api/handler/cluster.go
+++ b/api/handler/cluster.go
@@ -3,20 +3,24 @@ package handler
import (
"context"
"fmt"
- "os"
- "runtime"
- "strconv"
- "time"
-
"github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ "github.com/goodrain/rainbond/util/constants"
"github.com/shirou/gopsutil/disk"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
)
// ClusterHandler -
@@ -27,13 +31,26 @@ type ClusterHandler interface {
MavenSettingUpdate(ctx context.Context, ms *MavenSetting) *util.APIHandleError
MavenSettingDelete(ctx context.Context, name string) *util.APIHandleError
MavenSettingDetail(ctx context.Context, name string) (*MavenSetting, *util.APIHandleError)
+ GetNamespace(ctx context.Context, content string) ([]string, *util.APIHandleError)
+ GetNamespaceSource(ctx context.Context, content string, namespace string) (map[string]model.LabelResource, *util.APIHandleError)
+ ConvertResource(ctx context.Context, namespace string, lr map[string]model.LabelResource) (map[string]model.ApplicationResource, *util.APIHandleError)
+ ResourceImport(namespace string, as map[string]model.ApplicationResource, eid string) (*model.ReturnResourceImport, *util.APIHandleError)
+ AddAppK8SResource(ctx context.Context, namespace string, appID string, resourceYaml string) ([]*dbmodel.K8sResource, *util.APIHandleError)
+ DeleteAppK8SResource(ctx context.Context, namespace, appID, name, yaml, kind string) *util.APIHandleError
+ UpdateAppK8SResource(ctx context.Context, namespace, appID, name, resourceYaml, kind string) (dbmodel.K8sResource, *util.APIHandleError)
+ SyncAppK8SResources(ctx context.Context, resources *model.SyncResources) ([]*dbmodel.K8sResource, *util.APIHandleError)
+ AppYamlResourceName(yamlResource model.YamlResource) (map[string]model.LabelResource, *util.APIHandleError)
+ AppYamlResourceDetailed(yamlResource model.YamlResource, yamlImport bool) (model.ApplicationResource, *util.APIHandleError)
+ AppYamlResourceImport(yamlResource model.YamlResource, components model.ApplicationResource) (model.AppComponent, *util.APIHandleError)
}
// NewClusterHandler -
-func NewClusterHandler(clientset *kubernetes.Clientset, RbdNamespace string) ClusterHandler {
+func NewClusterHandler(clientset *kubernetes.Clientset, RbdNamespace string, config *rest.Config, mapper meta.RESTMapper) ClusterHandler {
return &clusterAction{
namespace: RbdNamespace,
clientset: clientset,
+ config: config,
+ mapper: mapper,
}
}
@@ -42,8 +59,11 @@ type clusterAction struct {
clientset *kubernetes.Clientset
clusterInfoCache *model.ClusterResource
cacheTime time.Time
+ config *rest.Config
+ mapper meta.RESTMapper
}
+//GetClusterInfo -
func (c *clusterAction) GetClusterInfo(ctx context.Context) (*model.ClusterResource, error) {
timeout, _ := strconv.Atoi(os.Getenv("CLUSTER_INFO_CACHE_TIME"))
if timeout == 0 {
@@ -332,3 +352,34 @@ func (c *clusterAction) MavenSettingDetail(ctx context.Context, name string) (*M
Content: sm.Data["mavensetting"],
}, nil
}
+
+//GetNamespace Get namespace of the current cluster
+func (c *clusterAction) GetNamespace(ctx context.Context, content string) ([]string, *util.APIHandleError) {
+ namespaceList, err := c.clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get namespace:%v", err)}
+ }
+ namespaces := new([]string)
+ for _, ns := range namespaceList.Items {
+ if strings.HasPrefix(ns.Name, "kube-") || ns.Name == "rainbond" || ns.Name == "rbd-system" {
+ continue
+ }
+ if labelValue, isRBDNamespace := ns.Labels[constants.ResourceManagedByLabel]; isRBDNamespace && labelValue == "rainbond" && content == "unmanaged" {
+ continue
+ }
+ *namespaces = append(*namespaces, ns.Name)
+ }
+ return *namespaces, nil
+}
+
+//MergeMap map去重合并
+func MergeMap(map1 map[string][]string, map2 map[string][]string) map[string][]string {
+ for k, v := range map1 {
+ if _, ok := map2[k]; ok {
+ map2[k] = append(map2[k], v...)
+ continue
+ }
+ map2[k] = v
+ }
+ return map2
+}
diff --git a/api/handler/covert_resource.go b/api/handler/covert_resource.go
new file mode 100644
index 000000000..9add4c9c9
--- /dev/null
+++ b/api/handler/covert_resource.go
@@ -0,0 +1,493 @@
+package handler
+
+import (
+ "fmt"
+ "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/api/util"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+ v1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ networkingv1 "k8s.io/api/networking/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "strings"
+)
+
+//ConvertResource 处理资源
+func (c *clusterAction) ConvertResource(ctx context.Context, namespace string, lr map[string]model.LabelResource) (map[string]model.ApplicationResource, *util.APIHandleError) {
+ logrus.Infof("ConvertResource function begin")
+ appsServices := make(map[string]model.ApplicationResource)
+ for label, resource := range lr {
+ c.workloadHandle(ctx, appsServices, resource, namespace, label)
+ }
+ logrus.Infof("ConvertResource function end")
+ return appsServices, nil
+}
+
+func (c *clusterAction) workloadHandle(ctx context.Context, cr map[string]model.ApplicationResource, lr model.LabelResource, namespace string, label string) {
+ app := label
+ deployResource := c.workloadDeployments(lr.Workloads.Deployments, namespace)
+ sfsResource := c.workloadStateFulSets(lr.Workloads.StateFulSets, namespace)
+ jobResource := c.workloadJobs(lr.Workloads.Jobs, namespace)
+ cjResource := c.workloadCronJobs(lr.Workloads.CronJobs, namespace)
+ convertResource := append(deployResource, append(sfsResource, append(jobResource, append(cjResource)...)...)...)
+ k8sResources := c.getAppKubernetesResources(ctx, lr.Others, namespace)
+ cr[app] = model.ApplicationResource{
+ ConvertResource: convertResource,
+ KubernetesResources: k8sResources,
+ }
+}
+
+func (c *clusterAction) workloadDeployments(dmNames []string, namespace string) []model.ConvertResource {
+ var componentsCR []model.ConvertResource
+ for _, dmName := range dmNames {
+ resources, err := c.clientset.AppsV1().Deployments(namespace).Get(context.Background(), dmName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Deployment %v:%v", dmName, err)
+ return nil
+ }
+
+ //BasicManagement
+ basic := model.BasicManagement{
+ ResourceType: model.Deployment,
+ Replicas: resources.Spec.Replicas,
+ Memory: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: resources.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(resources.Spec.Template.Spec.Containers[0].Command, resources.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := model.YamlResourceParameter{
+ ComponentsCR: &componentsCR,
+ Basic: basic,
+ Template: resources.Spec.Template,
+ Namespace: namespace,
+ Name: dmName,
+ RsLabel: resources.Labels,
+ }
+ c.PodTemplateSpecResource(parameter)
+ }
+ return componentsCR
+}
+
+func (c *clusterAction) workloadStateFulSets(sfsNames []string, namespace string) []model.ConvertResource {
+ var componentsCR []model.ConvertResource
+ for _, sfsName := range sfsNames {
+ resources, err := c.clientset.AppsV1().StatefulSets(namespace).Get(context.Background(), sfsName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Deployment %v:%v", sfsName, err)
+ return nil
+ }
+
+ //BasicManagement
+ basic := model.BasicManagement{
+ ResourceType: model.StateFulSet,
+ Replicas: resources.Spec.Replicas,
+ Memory: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: resources.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(resources.Spec.Template.Spec.Containers[0].Command, resources.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := model.YamlResourceParameter{
+ ComponentsCR: &componentsCR,
+ Basic: basic,
+ Template: resources.Spec.Template,
+ Namespace: namespace,
+ Name: sfsName,
+ RsLabel: resources.Labels,
+ }
+ c.PodTemplateSpecResource(parameter)
+ }
+ return componentsCR
+}
+
+func (c *clusterAction) workloadJobs(jobNames []string, namespace string) []model.ConvertResource {
+ var componentsCR []model.ConvertResource
+ for _, jobName := range jobNames {
+ resources, err := c.clientset.BatchV1().Jobs(namespace).Get(context.Background(), jobName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Deployment %v:%v", jobName, err)
+ return nil
+ }
+ var BackoffLimit, Parallelism, ActiveDeadlineSeconds, Completions string
+ if resources.Spec.BackoffLimit != nil {
+ BackoffLimit = fmt.Sprintf("%v", *resources.Spec.BackoffLimit)
+ }
+ if resources.Spec.Parallelism != nil {
+ Parallelism = fmt.Sprintf("%v", *resources.Spec.Parallelism)
+ }
+ if resources.Spec.ActiveDeadlineSeconds != nil {
+ ActiveDeadlineSeconds = fmt.Sprintf("%v", *resources.Spec.ActiveDeadlineSeconds)
+ }
+ if resources.Spec.Completions != nil {
+ Completions = fmt.Sprintf("%v", *resources.Spec.Completions)
+ }
+ job := model.JobStrategy{
+ Schedule: resources.Spec.Template.Spec.SchedulerName,
+ BackoffLimit: BackoffLimit,
+ Parallelism: Parallelism,
+ ActiveDeadlineSeconds: ActiveDeadlineSeconds,
+ Completions: Completions,
+ }
+ //BasicManagement
+ basic := model.BasicManagement{
+ ResourceType: model.Job,
+ Replicas: resources.Spec.Completions,
+ Memory: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: resources.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: resources.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(resources.Spec.Template.Spec.Containers[0].Command, resources.Spec.Template.Spec.Containers[0].Args...), " "),
+ JobStrategy: job,
+ }
+ parameter := model.YamlResourceParameter{
+ ComponentsCR: &componentsCR,
+ Basic: basic,
+ Template: resources.Spec.Template,
+ Namespace: namespace,
+ Name: jobName,
+ RsLabel: resources.Labels,
+ }
+ c.PodTemplateSpecResource(parameter)
+ }
+ return componentsCR
+}
+
+func (c *clusterAction) workloadCronJobs(cjNames []string, namespace string) []model.ConvertResource {
+ var componentsCR []model.ConvertResource
+ for _, cjName := range cjNames {
+ resources, err := c.clientset.BatchV1beta1().CronJobs(namespace).Get(context.Background(), cjName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Deployment %v:%v", cjName, err)
+ return nil
+ }
+ BackoffLimit, Parallelism, ActiveDeadlineSeconds, Completions := "", "", "", ""
+ if resources.Spec.JobTemplate.Spec.BackoffLimit != nil {
+ BackoffLimit = fmt.Sprintf("%v", *resources.Spec.JobTemplate.Spec.BackoffLimit)
+ }
+ if resources.Spec.JobTemplate.Spec.Parallelism != nil {
+ Parallelism = fmt.Sprintf("%v", *resources.Spec.JobTemplate.Spec.Parallelism)
+ }
+ if resources.Spec.JobTemplate.Spec.ActiveDeadlineSeconds != nil {
+ ActiveDeadlineSeconds = fmt.Sprintf("%v", *resources.Spec.JobTemplate.Spec.ActiveDeadlineSeconds)
+ }
+ if resources.Spec.JobTemplate.Spec.Completions != nil {
+ Completions = fmt.Sprintf("%v", *resources.Spec.JobTemplate.Spec.Completions)
+ }
+ job := model.JobStrategy{
+ Schedule: resources.Spec.Schedule,
+ BackoffLimit: BackoffLimit,
+ Parallelism: Parallelism,
+ ActiveDeadlineSeconds: ActiveDeadlineSeconds,
+ Completions: Completions,
+ }
+ //BasicManagement
+ basic := model.BasicManagement{
+ ResourceType: model.CronJob,
+ Replicas: resources.Spec.JobTemplate.Spec.Completions,
+ Memory: resources.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: resources.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: resources.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(resources.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command, resources.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args...), " "),
+ JobStrategy: job,
+ }
+ parameter := model.YamlResourceParameter{
+ ComponentsCR: &componentsCR,
+ Basic: basic,
+ Template: resources.Spec.JobTemplate.Spec.Template,
+ Namespace: namespace,
+ Name: cjName,
+ RsLabel: resources.Labels,
+ }
+ c.PodTemplateSpecResource(parameter)
+ }
+ return componentsCR
+}
+
+func (c *clusterAction) getAppKubernetesResources(ctx context.Context, others model.OtherResource, namespace string) []dbmodel.K8sResource {
+ var k8sResources []dbmodel.K8sResource
+ servicesMap := make(map[string]corev1.Service)
+ servicesList, err := c.clientset.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get services error:%v", namespace, err)
+ }
+ if len(others.Services) != 0 && err == nil {
+ for _, services := range servicesList.Items {
+ servicesMap[services.Name] = services
+ }
+ for _, servicesName := range others.Services {
+ services, _ := servicesMap[servicesName]
+ services.Kind = model.Service
+ services.Status = corev1.ServiceStatus{}
+ services.APIVersion = "v1"
+ services.ManagedFields = []metav1.ManagedFieldsEntry{}
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", services)
+ if err != nil {
+ logrus.Errorf("namespace:%v service:%v error: %v", namespace, services.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: services.Name,
+ Kind: model.Service,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ pvcMap := make(map[string]corev1.PersistentVolumeClaim)
+ pvcList, err := c.clientset.CoreV1().PersistentVolumeClaims(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get pvc error:%v", namespace, err)
+ }
+ if len(others.PVC) != 0 && err == nil {
+ for _, pvc := range pvcList.Items {
+ pvcMap[pvc.Name] = pvc
+ }
+ for _, pvcName := range others.PVC {
+ pvc, _ := pvcMap[pvcName]
+ pvc.Status = corev1.PersistentVolumeClaimStatus{}
+ pvc.ManagedFields = []metav1.ManagedFieldsEntry{}
+ pvc.Kind = model.PVC
+ pvc.APIVersion = "v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", pvc)
+ if err != nil {
+ logrus.Errorf("namespace:%v pvc:%v error: %v", namespace, pvc.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: pvc.Name,
+ Kind: model.PVC,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ ingressMap := make(map[string]networkingv1.Ingress)
+ ingressList, err := c.clientset.NetworkingV1().Ingresses(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get ingresses error:%v", namespace, err)
+ }
+ if len(others.Ingresses) != 0 && err == nil {
+ for _, ingress := range ingressList.Items {
+ ingressMap[ingress.Name] = ingress
+ }
+ for _, ingressName := range others.Ingresses {
+ ingresses, _ := ingressMap[ingressName]
+ ingresses.Status = networkingv1.IngressStatus{}
+ ingresses.ManagedFields = []metav1.ManagedFieldsEntry{}
+ ingresses.Kind = model.Ingress
+ ingresses.APIVersion = "networking.k8s.io/v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", ingresses)
+ if err != nil {
+ logrus.Errorf("namespace:%v ingresses:%v error: %v", namespace, ingresses.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: ingresses.Name,
+ Kind: model.Ingress,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ networkPoliciesMap := make(map[string]networkingv1.NetworkPolicy)
+ networkPoliciesList, err := c.clientset.NetworkingV1().NetworkPolicies(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get NetworkPolicies error:%v", namespace, err)
+ }
+ if len(others.NetworkPolicies) != 0 && err == nil {
+ for _, networkPolicies := range networkPoliciesList.Items {
+ networkPoliciesMap[networkPolicies.Name] = networkPolicies
+ }
+ for _, networkPoliciesName := range others.NetworkPolicies {
+ networkPolicies, _ := networkPoliciesMap[networkPoliciesName]
+ networkPolicies.ManagedFields = []metav1.ManagedFieldsEntry{}
+ networkPolicies.Kind = model.NetworkPolicy
+ networkPolicies.APIVersion = "networking.k8s.io/v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", networkPolicies)
+ if err != nil {
+ logrus.Errorf("namespace:%v NetworkPolicies:%v error: %v", namespace, networkPolicies.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: networkPolicies.Name,
+ Kind: model.NetworkPolicy,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ cmMap := make(map[string]corev1.ConfigMap)
+ cmList, err := c.clientset.CoreV1().ConfigMaps(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get ConfigMaps error:%v", namespace, err)
+ }
+ if len(others.ConfigMaps) != 0 && err == nil {
+ for _, cm := range cmList.Items {
+ cmMap[cm.Name] = cm
+ }
+ for _, configMapsName := range others.ConfigMaps {
+ configMaps, _ := cmMap[configMapsName]
+ configMaps.ManagedFields = []metav1.ManagedFieldsEntry{}
+ configMaps.Kind = model.ConfigMap
+ configMaps.APIVersion = "v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", configMaps)
+ if err != nil {
+ logrus.Errorf("namespace:%v ConfigMaps:%v error: %v", namespace, configMaps.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: configMaps.Name,
+ Kind: model.ConfigMap,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ secretsMap := make(map[string]corev1.Secret)
+ secretsList, err := c.clientset.CoreV1().Secrets(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get Secrets error:%v", namespace, err)
+ }
+ if len(others.Secrets) != 0 && err == nil {
+ for _, secrets := range secretsList.Items {
+ secretsMap[secrets.Name] = secrets
+ }
+ for _, secretsName := range others.Secrets {
+ secrets, _ := secretsMap[secretsName]
+ secrets.ManagedFields = []metav1.ManagedFieldsEntry{}
+ secrets.Kind = model.Secret
+ secrets.APIVersion = "v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", secrets)
+ if err != nil {
+ logrus.Errorf("namespace:%v Secrets:%v error: %v", namespace, secrets.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: secrets.Name,
+ Kind: model.Secret,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ serviceAccountsMap := make(map[string]corev1.ServiceAccount)
+ serviceAccountsList, err := c.clientset.CoreV1().ServiceAccounts(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get ServiceAccounts error:%v", namespace, err)
+ }
+ if len(others.ServiceAccounts) != 0 && err == nil {
+ for _, serviceAccounts := range serviceAccountsList.Items {
+ serviceAccountsMap[serviceAccounts.Name] = serviceAccounts
+ }
+ for _, serviceAccountsName := range others.ServiceAccounts {
+ serviceAccounts, _ := serviceAccountsMap[serviceAccountsName]
+ serviceAccounts.ManagedFields = []metav1.ManagedFieldsEntry{}
+ serviceAccounts.Kind = model.ServiceAccount
+ serviceAccounts.APIVersion = "v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", serviceAccounts)
+ if err != nil {
+ logrus.Errorf("namespace:%v ServiceAccounts:%v error: %v", namespace, serviceAccounts.Name, err)
+ continue
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: serviceAccounts.Name,
+ Kind: model.ServiceAccount,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ roleBindingsMap := make(map[string]rbacv1.RoleBinding)
+ roleBindingsList, _ := c.clientset.RbacV1().RoleBindings(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get RoleBindings error:%v", namespace, err)
+ }
+ if len(others.RoleBindings) != 0 && err == nil {
+ for _, roleBindings := range roleBindingsList.Items {
+ roleBindingsMap[roleBindings.Name] = roleBindings
+ }
+ for _, roleBindingsName := range others.RoleBindings {
+ roleBindings, _ := roleBindingsMap[roleBindingsName]
+ roleBindings.ManagedFields = []metav1.ManagedFieldsEntry{}
+ roleBindings.Kind = model.RoleBinding
+ roleBindings.APIVersion = "rbac.authorization.k8s.io/v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", roleBindings)
+ if err != nil {
+ logrus.Errorf("namespace:%v RoleBindings:%v error: %v", namespace, roleBindings.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: roleBindings.Name,
+ Kind: model.RoleBinding,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ hpaMap := make(map[string]v1.HorizontalPodAutoscaler)
+ hpaList, _ := c.clientset.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get HorizontalPodAutoscalers error:%v", namespace, err)
+ }
+ if len(others.HorizontalPodAutoscalers) != 0 && err == nil {
+ for _, hpa := range hpaList.Items {
+ hpaMap[hpa.Name] = hpa
+ }
+ for _, hpaName := range others.HorizontalPodAutoscalers {
+ hpa, _ := hpaMap[hpaName]
+ hpa.Status = v1.HorizontalPodAutoscalerStatus{}
+ hpa.ManagedFields = []metav1.ManagedFieldsEntry{}
+ hpa.Kind = model.HorizontalPodAutoscaler
+ hpa.APIVersion = "autoscaling/v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", hpa)
+ if err != nil {
+ logrus.Errorf("namespace:%v HorizontalPodAutoscalers:%v error: %v", namespace, hpa.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: hpa.Name,
+ Kind: model.HorizontalPodAutoscaler,
+ Content: kubernetesResourcesYAML,
+ State: 1,
+ ErrorOverview: "创建成功",
+ })
+ }
+ }
+
+ rolesMap := make(map[string]rbacv1.Role)
+ rolesList, err := c.clientset.RbacV1().Roles(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("namespace:%v get roles error:%v", namespace, err)
+ }
+ if len(others.Roles) != 0 && err == nil {
+ for _, roles := range rolesList.Items {
+ rolesMap[roles.Name] = roles
+ }
+ for _, rolesName := range others.Roles {
+ roles, _ := rolesMap[rolesName]
+ roles.Kind = model.Role
+ roles.APIVersion = "rbac.authorization.k8s.io/v1"
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", roles)
+ if err != nil {
+ logrus.Errorf("namespace:%v roles:%v error: %v", namespace, roles.Name, err)
+ }
+ k8sResources = append(k8sResources, dbmodel.K8sResource{
+ Name: roles.Name,
+ Kind: model.Role,
+ Content: kubernetesResourcesYAML,
+ ErrorOverview: "创建成功",
+ State: 1,
+ })
+ }
+ }
+ return k8sResources
+}
diff --git a/api/handler/get_namespace_resource_name.go b/api/handler/get_namespace_resource_name.go
new file mode 100644
index 000000000..45b2552a1
--- /dev/null
+++ b/api/handler/get_namespace_resource_name.go
@@ -0,0 +1,530 @@
+package handler
+
+import (
+ "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/api/util"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+//GetNamespaceSource Get all resources in the current namespace
+func (c *clusterAction) GetNamespaceSource(ctx context.Context, content string, namespace string) (map[string]model.LabelResource, *util.APIHandleError) {
+ logrus.Infof("GetNamespaceSource function begin")
+ //存储workloads们的ConfigMap
+ cmsMap := make(map[string][]string)
+ //存储workloads们的secrets
+ secretsMap := make(map[string][]string)
+ deployments, cmMap, secretMap := c.getResourceName(context.Background(), namespace, content, model.Deployment)
+ if len(cmsMap) != 0 {
+ cmsMap = MergeMap(cmMap, cmsMap)
+ }
+ if len(secretMap) != 0 {
+ secretsMap = MergeMap(secretMap, secretsMap)
+ }
+ jobs, cmMap, secretMap := c.getResourceName(ctx, namespace, content, model.Job)
+ if len(cmsMap) != 0 {
+ cmsMap = MergeMap(cmMap, cmsMap)
+ }
+ if len(secretMap) != 0 {
+ secretsMap = MergeMap(secretMap, secretsMap)
+ }
+ cronJobs, cmMap, secretMap := c.getResourceName(ctx, namespace, content, model.CronJob)
+ if len(cmsMap) != 0 {
+ cmsMap = MergeMap(cmMap, cmsMap)
+ }
+ if len(secretMap) != 0 {
+ secretsMap = MergeMap(secretMap, secretsMap)
+ }
+ stateFulSets, cmMap, secretMap := c.getResourceName(ctx, namespace, content, model.StateFulSet)
+ if len(cmsMap) != 0 {
+ cmsMap = MergeMap(cmMap, cmsMap)
+ }
+ if len(secretMap) != 0 {
+ secretsMap = MergeMap(secretMap, secretsMap)
+ }
+ processWorkloads := model.LabelWorkloadsResourceProcess{
+ Deployments: deployments,
+ Jobs: jobs,
+ CronJobs: cronJobs,
+ StateFulSets: stateFulSets,
+ }
+ services, _, _ := c.getResourceName(ctx, namespace, content, model.Service)
+ pvc, _, _ := c.getResourceName(ctx, namespace, content, model.PVC)
+ ingresses, _, _ := c.getResourceName(ctx, namespace, content, model.Ingress)
+ networkPolicies, _, _ := c.getResourceName(ctx, namespace, content, model.NetworkPolicy)
+ cms, _, _ := c.getResourceName(ctx, namespace, content, model.ConfigMap)
+ secrets, _, _ := c.getResourceName(ctx, namespace, content, model.Secret)
+ serviceAccounts, _, _ := c.getResourceName(ctx, namespace, content, model.ServiceAccount)
+ roleBindings, _, _ := c.getResourceName(ctx, namespace, content, model.RoleBinding)
+ horizontalPodAutoscalers, _, _ := c.getResourceName(ctx, namespace, content, model.HorizontalPodAutoscaler)
+ roles, _, _ := c.getResourceName(ctx, namespace, content, model.Role)
+ processOthers := model.LabelOthersResourceProcess{
+ Services: services,
+ PVC: pvc,
+ Ingresses: ingresses,
+ NetworkPolicies: networkPolicies,
+ ConfigMaps: MergeMap(cmsMap, cms),
+ Secrets: MergeMap(secretsMap, secrets),
+ ServiceAccounts: serviceAccounts,
+ RoleBindings: roleBindings,
+ HorizontalPodAutoscalers: horizontalPodAutoscalers,
+ Roles: roles,
+ }
+ labelResource := resourceProcessing(processWorkloads, processOthers)
+ logrus.Infof("GetNamespaceSource function end")
+ return labelResource, nil
+}
+
+//resourceProcessing 将处理好的资源类型数据格式再加工成可作为返回值的数据。
+func resourceProcessing(processWorkloads model.LabelWorkloadsResourceProcess, processOthers model.LabelOthersResourceProcess) map[string]model.LabelResource {
+ labelResource := make(map[string]model.LabelResource)
+ for label, deployments := range processWorkloads.Deployments {
+ if val, ok := labelResource[label]; ok {
+ val.Workloads.Deployments = deployments
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Workloads: model.WorkLoadsResource{
+ Deployments: deployments,
+ },
+ }
+ }
+ for label, jobs := range processWorkloads.Jobs {
+ if val, ok := labelResource[label]; ok {
+ val.Workloads.Jobs = jobs
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Workloads: model.WorkLoadsResource{
+ Jobs: jobs,
+ },
+ }
+
+ }
+ for label, cronJobs := range processWorkloads.CronJobs {
+ if val, ok := labelResource[label]; ok {
+ val.Workloads.CronJobs = cronJobs
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Workloads: model.WorkLoadsResource{
+ CronJobs: cronJobs,
+ },
+ }
+ }
+ for label, stateFulSets := range processWorkloads.StateFulSets {
+ if val, ok := labelResource[label]; ok {
+ val.Workloads.StateFulSets = stateFulSets
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Workloads: model.WorkLoadsResource{
+ StateFulSets: stateFulSets,
+ },
+ }
+ }
+ for label, service := range processOthers.Services {
+ if val, ok := labelResource[label]; ok {
+ val.Others.Services = service
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ Services: service,
+ },
+ }
+
+ }
+ for label, pvc := range processOthers.PVC {
+ if val, ok := labelResource[label]; ok {
+ val.Others.PVC = pvc
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ PVC: pvc,
+ },
+ }
+
+ }
+ for label, ingresses := range processOthers.Ingresses {
+ if val, ok := labelResource[label]; ok {
+ val.Others.Ingresses = ingresses
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ Ingresses: ingresses,
+ },
+ }
+ }
+ for label, networkPolicies := range processOthers.NetworkPolicies {
+ if val, ok := labelResource[label]; ok {
+ val.Others.NetworkPolicies = networkPolicies
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ NetworkPolicies: networkPolicies,
+ },
+ }
+ }
+ for label, configMaps := range processOthers.ConfigMaps {
+ if val, ok := labelResource[label]; ok {
+ val.Others.ConfigMaps = configMaps
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ ConfigMaps: configMaps,
+ },
+ }
+ }
+ for label, secrets := range processOthers.Secrets {
+ if val, ok := labelResource[label]; ok {
+ val.Others.Secrets = secrets
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ Secrets: secrets,
+ },
+ }
+ }
+ for label, serviceAccounts := range processOthers.ServiceAccounts {
+ if val, ok := labelResource[label]; ok {
+ val.Others.ServiceAccounts = serviceAccounts
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ ServiceAccounts: serviceAccounts,
+ },
+ }
+ }
+ for label, roleBindings := range processOthers.RoleBindings {
+ if val, ok := labelResource[label]; ok {
+ val.Others.RoleBindings = roleBindings
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ RoleBindings: roleBindings,
+ },
+ }
+ }
+ for label, horizontalPodAutoscalers := range processOthers.HorizontalPodAutoscalers {
+ if val, ok := labelResource[label]; ok {
+ val.Others.HorizontalPodAutoscalers = horizontalPodAutoscalers
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ HorizontalPodAutoscalers: horizontalPodAutoscalers,
+ },
+ }
+ }
+ for label, roles := range processOthers.Roles {
+ if val, ok := labelResource[label]; ok {
+ val.Others.Roles = roles
+ labelResource[label] = val
+ continue
+ }
+ labelResource[label] = model.LabelResource{
+ Others: model.OtherResource{
+ Roles: roles,
+ },
+ }
+ }
+ return labelResource
+}
+
+//Resource -
+type Resource struct {
+ ObjectMeta metav1.ObjectMeta
+ Template corev1.PodTemplateSpec
+}
+
+//getResourceName 将指定资源类型按照【label名】:[]{资源名...}处理后返回
+func (c *clusterAction) getResourceName(ctx context.Context, namespace string, content string, resourcesType string) (map[string][]string, map[string][]string, map[string][]string) {
+ resourceName := make(map[string][]string)
+ var tempResources []*Resource
+ isWorkloads := false
+ cmMap := make(map[string][]string)
+ secretMap := make(map[string][]string)
+ switch resourcesType {
+ case model.Deployment:
+ resources, err := c.clientset.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Deployment list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta, Template: dm.Spec.Template})
+ }
+ isWorkloads = true
+ case model.Job:
+ resources, err := c.clientset.BatchV1().Jobs(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Job list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ if dm.OwnerReferences != nil {
+ if dm.OwnerReferences[0].Kind == model.CronJob {
+ continue
+ }
+ }
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta, Template: dm.Spec.Template})
+ }
+ isWorkloads = true
+ case model.CronJob:
+ resources, err := c.clientset.BatchV1beta1().CronJobs(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get CronJob list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta, Template: dm.Spec.JobTemplate.Spec.Template})
+ }
+ isWorkloads = true
+ case model.StateFulSet:
+ resources, err := c.clientset.AppsV1().StatefulSets(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get StateFulSets list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta, Template: dm.Spec.Template})
+ }
+ isWorkloads = true
+ case model.Service:
+ resources, err := c.clientset.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Services list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.PVC:
+ resources, err := c.clientset.CoreV1().PersistentVolumeClaims(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get PersistentVolumeClaims list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.Ingress:
+ resources, err := c.clientset.NetworkingV1().Ingresses(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Ingresses list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.NetworkPolicy:
+ resources, err := c.clientset.NetworkingV1().NetworkPolicies(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get NetworkPolicies list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.ConfigMap:
+ resources, err := c.clientset.CoreV1().ConfigMaps(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get ConfigMaps list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.Secret:
+ resources, err := c.clientset.CoreV1().Secrets(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Secrets list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.ServiceAccount:
+ resources, err := c.clientset.CoreV1().ServiceAccounts(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get ServiceAccounts list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.RoleBinding:
+ resources, err := c.clientset.RbacV1().RoleBindings(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get RoleBindings list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ case model.HorizontalPodAutoscaler:
+ resources, err := c.clientset.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get HorizontalPodAutoscalers list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, hpa := range resources.Items {
+ rbdResource := false
+ labels := make(map[string]string)
+ switch hpa.Spec.ScaleTargetRef.Kind {
+ case model.Deployment:
+ deploy, err := c.clientset.AppsV1().Deployments(namespace).Get(context.Background(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("The bound deployment does not exist:%v", err)
+ }
+ if hpa.ObjectMeta.Labels["creator"] == "Rainbond" {
+ rbdResource = true
+ }
+ labels = deploy.ObjectMeta.Labels
+ case model.StateFulSet:
+ ss, err := c.clientset.AppsV1().StatefulSets(namespace).Get(context.Background(), hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("The bound deployment does not exist:%v", err)
+ }
+ if hpa.ObjectMeta.Labels["creator"] == "Rainbond" {
+ rbdResource = true
+ }
+ labels = ss.ObjectMeta.Labels
+ }
+ var app string
+ if content == "unmanaged" && rbdResource {
+ continue
+ }
+ app = labels["app"]
+ if labels["app.kubernetes.io/name"] != "" {
+ app = labels["app.kubernetes.io/name"]
+ }
+ if app == "" {
+ app = "unclassified"
+ }
+ if _, ok := resourceName[app]; ok {
+ resourceName[app] = append(resourceName[app], hpa.Name)
+ } else {
+ resourceName[app] = []string{hpa.Name}
+ }
+ }
+ return resourceName, nil, nil
+ case model.Role:
+ resources, err := c.clientset.RbacV1().Roles(namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Roles list:%v", err)
+ return nil, cmMap, secretMap
+ }
+ for _, dm := range resources.Items {
+ tempResources = append(tempResources, &Resource{ObjectMeta: dm.ObjectMeta})
+ }
+ }
+ //这一块是统一处理资源,按label划分出来
+ for _, rs := range tempResources {
+ if content == "unmanaged" && rs.ObjectMeta.Labels["creator"] == "Rainbond" {
+ continue
+ }
+ app := rs.ObjectMeta.Labels["app"]
+ if rs.ObjectMeta.Labels["app.kubernetes.io/name"] != "" {
+ app = rs.ObjectMeta.Labels["app.kubernetes.io/name"]
+ }
+ if app == "" {
+ app = "unclassified"
+ }
+ //如果是Workloads类型的资源需要检查其内部configmap、secret、PVC(防止没有这三种资源没有label但是用到了)
+ if isWorkloads {
+ cmList, secretList := c.replenishLabel(ctx, rs, namespace, app)
+ if _, ok := cmMap[app]; ok {
+ cmMap[app] = append(cmMap[app], cmList...)
+ } else {
+ cmMap[app] = cmList
+ }
+ if _, ok := secretMap[app]; ok {
+ secretMap[app] = append(secretMap[app], secretList...)
+ } else {
+ secretMap[app] = secretList
+ }
+ }
+ if _, ok := resourceName[app]; ok {
+ resourceName[app] = append(resourceName[app], rs.ObjectMeta.Name)
+ } else {
+ resourceName[app] = []string{rs.ObjectMeta.Name}
+ }
+ }
+ return resourceName, cmMap, secretMap
+}
+
+//replenishLabel 获取workloads资源上携带的ConfigMap和secret,以及把pvc加上标签。
+func (c *clusterAction) replenishLabel(ctx context.Context, resource *Resource, namespace string, app string) ([]string, []string) {
+ var cmList []string
+ var secretList []string
+ resourceVolume := resource.Template.Spec.Volumes
+ for _, volume := range resourceVolume {
+ if pvc := volume.PersistentVolumeClaim; pvc != nil {
+ PersistentVolumeClaims, err := c.clientset.CoreV1().PersistentVolumeClaims(namespace).Get(context.Background(), pvc.ClaimName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get PersistentVolumeClaims %s/%s:%v", namespace, pvc.ClaimName, err)
+ }
+ if PersistentVolumeClaims.Labels == nil {
+ PersistentVolumeClaims.Labels = make(map[string]string)
+ }
+ if _, ok := PersistentVolumeClaims.Labels["app"]; !ok {
+ if _, ok := PersistentVolumeClaims.Labels["app.kubernetes.io/name"]; !ok {
+ PersistentVolumeClaims.Labels["app"] = app
+ }
+ }
+ _, err = c.clientset.CoreV1().PersistentVolumeClaims(namespace).Update(context.Background(), PersistentVolumeClaims, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("PersistentVolumeClaims label update error:%v", err)
+ }
+ continue
+ }
+ if cm := volume.ConfigMap; cm != nil {
+ cm, err := c.clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), cm.Name, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get ConfigMap:%v", err)
+ }
+ if _, ok := cm.Labels["app"]; !ok {
+ if _, ok := cm.Labels["app.kubernetes.io/name"]; !ok {
+ cmList = append(cmList, cm.Name)
+ }
+ }
+ }
+ if secret := volume.Secret; secret != nil {
+ secret, err := c.clientset.CoreV1().Secrets(namespace).Get(context.Background(), secret.SecretName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get Scret:%v", err)
+ }
+ if _, ok := secret.Labels["app"]; !ok {
+ if _, ok := secret.Labels["app.kubernetes.io/name"]; !ok {
+ cmList = append(cmList, secret.Name)
+ }
+ }
+ }
+ }
+ return cmList, secretList
+}
diff --git a/api/handler/handler.go b/api/handler/handler.go
index 8565cd515..ba15fe423 100644
--- a/api/handler/handler.go
+++ b/api/handler/handler.go
@@ -30,7 +30,9 @@ import (
etcdutil "github.com/goodrain/rainbond/util/etcd"
"github.com/goodrain/rainbond/worker/client"
"github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -42,6 +44,8 @@ func InitHandle(conf option.Config,
kubeClient *kubernetes.Clientset,
rainbondClient versioned.Interface,
k8sClient k8sclient.Client,
+ config *rest.Config,
+ mapper meta.RESTMapper,
) error {
mq := api_db.MQManager{
EtcdClientArgs: etcdClientArgs,
@@ -80,7 +84,7 @@ func InitHandle(conf option.Config,
batchOperationHandler = CreateBatchOperationHandler(mqClient, statusCli, operationHandler)
defaultAppRestoreHandler = NewAppRestoreHandler()
defPodHandler = NewPodHandler(statusCli)
- defClusterHandler = NewClusterHandler(kubeClient, conf.RbdNamespace)
+ defClusterHandler = NewClusterHandler(kubeClient, conf.RbdNamespace, config, mapper)
defaultVolumeTypeHandler = CreateVolumeTypeManger(statusCli)
defaultEtcdHandler = NewEtcdHandler(etcdcli)
defaultmonitorHandler = NewMonitorHandler(prometheusCli)
diff --git a/api/handler/k8s_attribute.go b/api/handler/k8s_attribute.go
new file mode 100644
index 000000000..6b2541bb5
--- /dev/null
+++ b/api/handler/k8s_attribute.go
@@ -0,0 +1,44 @@
+// RAINBOND, Application Management Platform
+// Copyright (C) 2022-2022 Goodrain Co., Ltd.
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version. For any non-GPL usage of Rainbond,
+// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
+// must be obtained first.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+package handler
+
+import (
+ api_model "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/db"
+)
+
+// CreateK8sAttribute -
+func (s *ServiceAction) CreateK8sAttribute(tenantID, componentID string, k8sAttr *api_model.ComponentK8sAttribute) error {
+ return db.GetManager().ComponentK8sAttributeDao().AddModel(k8sAttr.DbModel(tenantID, componentID))
+}
+
+// UpdateK8sAttribute -
+func (s *ServiceAction) UpdateK8sAttribute(componentID string, k8sAttributes *api_model.ComponentK8sAttribute) error {
+ attr, err := db.GetManager().ComponentK8sAttributeDao().GetByComponentIDAndName(componentID, k8sAttributes.Name)
+ if err != nil {
+ return err
+ }
+ attr.AttributeValue = k8sAttributes.AttributeValue
+ return db.GetManager().ComponentK8sAttributeDao().UpdateModel(attr)
+}
+
+// DeleteK8sAttribute -
+func (s *ServiceAction) DeleteK8sAttribute(componentID, name string) error {
+ return db.GetManager().ComponentK8sAttributeDao().DeleteByComponentIDAndName(componentID, name)
+}
diff --git a/api/handler/resource.go b/api/handler/resource.go
new file mode 100644
index 000000000..27210cf72
--- /dev/null
+++ b/api/handler/resource.go
@@ -0,0 +1,249 @@
+package handler
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/api/util"
+ "github.com/goodrain/rainbond/db"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
+ yamlt "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/client-go/dynamic"
+)
+
+//AddAppK8SResource -
+func (c *clusterAction) AddAppK8SResource(ctx context.Context, namespace string, appID string, resourceYaml string) ([]*dbmodel.K8sResource, *util.APIHandleError) {
+ logrus.Info("begin AddAppK8SResource")
+ resourceObjects := c.HandleResourceYaml([]byte(resourceYaml), namespace, "create", "")
+ var resourceList []*dbmodel.K8sResource
+ for _, resourceObject := range resourceObjects {
+ resource := resourceObject
+ if resourceObject.State == model.CreateError {
+ rsYaml := resourceYaml
+ if resourceObject.Resource != nil {
+ rsYaml, _ = ObjectToJSONORYaml("yaml", resourceObject.Resource)
+ }
+ resourceList = append(resourceList, &dbmodel.K8sResource{
+ AppID: appID,
+ Name: "未识别",
+ Kind: "未识别",
+ Content: rsYaml,
+ ErrorOverview: resource.ErrorOverview,
+ State: resource.State,
+ })
+ } else {
+ rsYaml, _ := ObjectToJSONORYaml("yaml", resourceObject.Resource)
+ resourceList = append(resourceList, &dbmodel.K8sResource{
+ AppID: appID,
+ Name: resource.Resource.GetName(),
+ Kind: resource.Resource.GetKind(),
+ Content: rsYaml,
+ ErrorOverview: resource.ErrorOverview,
+ State: resource.State,
+ })
+ err := db.GetManager().K8sResourceDao().CreateK8sResourceInBatch(resourceList)
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 400, Err: fmt.Errorf("CreateK8sResource %v", err)}
+ }
+ }
+ }
+ return resourceList, nil
+}
+
+//UpdateAppK8SResource -
+func (c *clusterAction) UpdateAppK8SResource(ctx context.Context, namespace, appID, name, resourceYaml, kind string) (dbmodel.K8sResource, *util.APIHandleError) {
+ logrus.Info("begin UpdateAppK8SResource")
+ rs, err := db.GetManager().K8sResourceDao().GetK8sResourceByNameInBatch(appID, name, kind)
+ if err != nil {
+ return dbmodel.K8sResource{}, &util.APIHandleError{Code: 400, Err: fmt.Errorf("get k8s resource %v", err)}
+ }
+ resourceObjects := c.HandleResourceYaml([]byte(resourceYaml), namespace, "update", name)
+ var rsYaml string
+ if resourceObjects[0].State == 4 {
+ rsYaml = resourceYaml
+ rs[0].State = resourceObjects[0].State
+ rs[0].ErrorOverview = resourceObjects[0].ErrorOverview
+ rs[0].Content = rsYaml
+ db.GetManager().K8sResourceDao().UpdateModel(&rs[0])
+ } else {
+ rsYaml, _ = ObjectToJSONORYaml("yaml", resourceObjects[0].Resource)
+ rs[0].State = resourceObjects[0].State
+ rs[0].ErrorOverview = resourceObjects[0].ErrorOverview
+ rs[0].Content = rsYaml
+ db.GetManager().K8sResourceDao().UpdateModel(&rs[0])
+ }
+ return rs[0], nil
+}
+
+//DeleteAppK8SResource -
+func (c *clusterAction) DeleteAppK8SResource(ctx context.Context, namespace, appID, name, resourceYaml, kind string) *util.APIHandleError {
+ logrus.Info("begin DeleteAppK8SResource")
+ c.HandleResourceYaml([]byte(resourceYaml), namespace, "delete", name)
+ err := db.GetManager().K8sResourceDao().DeleteK8sResourceInBatch(appID, name, kind)
+ if err != nil {
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("DeleteAppK8SResource %v", err)}
+ }
+ return nil
+}
+
+// SyncAppK8SResources -
+func (c *clusterAction) SyncAppK8SResources(ctx context.Context, req *model.SyncResources) ([]*dbmodel.K8sResource, *util.APIHandleError) {
+ // Only Add
+ logrus.Info("begin SyncAppK8SResource")
+ var resourceList []*dbmodel.K8sResource
+ for _, k8sResource := range req.K8sResources {
+ resourceObjects := c.HandleResourceYaml([]byte(k8sResource.ResourceYaml), k8sResource.Namespace, "re-create", k8sResource.Name)
+ if len(resourceObjects) > 1 {
+ logrus.Warningf("SyncAppK8SResources resourceObjects [%s] too much, ignore it", k8sResource.Name)
+ continue
+ }
+ if len(resourceObjects) == 1 {
+ resourceList = append(resourceList, &dbmodel.K8sResource{
+ AppID: k8sResource.AppID,
+ Name: k8sResource.Name,
+ Kind: k8sResource.Kind,
+ Content: k8sResource.ResourceYaml,
+ ErrorOverview: resourceObjects[0].ErrorOverview,
+ State: resourceObjects[0].State,
+ })
+ }
+ }
+ err := db.GetManager().K8sResourceDao().CreateK8sResourceInBatch(resourceList)
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 400, Err: fmt.Errorf("SyncK8sResource %v", err)}
+ }
+ return resourceList, nil
+}
+
+//HandleResourceYaml -
+func (c *clusterAction) HandleResourceYaml(resourceYaml []byte, namespace string, change string, name string) []*model.BuildResource {
+ var buildResourceList []*model.BuildResource
+ var state int
+ if change == "create" || change == "re-create" {
+ state = model.CreateError
+ } else if change == "update" {
+ state = model.UpdateError
+ }
+ dc, err := dynamic.NewForConfig(c.config)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ resourceYamlByte := resourceYaml
+ if err != nil {
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ decoder := yamlt.NewYAMLOrJSONDecoder(bytes.NewReader(resourceYamlByte), 1000)
+ for {
+ var rawObj runtime.RawExtension
+ if err = decoder.Decode(&rawObj); err != nil {
+ if err.Error() == "EOF" {
+ break
+ }
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ obj, gvk, err := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ //转化成map
+ unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ //转化成对象
+ unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}
+ mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ buildResourceList = []*model.BuildResource{{
+ State: state,
+ ErrorOverview: err.Error(),
+ }}
+ return buildResourceList
+ }
+ var dri dynamic.ResourceInterface
+ if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
+ unstructuredObj.SetNamespace(namespace)
+ dri = dc.Resource(mapping.Resource).Namespace(unstructuredObj.GetNamespace())
+ } else {
+ dri = dc.Resource(mapping.Resource)
+ }
+ br := &model.BuildResource{
+ Resource: unstructuredObj,
+ Dri: dri,
+ }
+ buildResourceList = append(buildResourceList, br)
+ }
+ for _, buildResource := range buildResourceList {
+ unstructuredObj := buildResource.Resource
+ switch change {
+ case "re-create":
+ unstructuredObj.SetResourceVersion("")
+ unstructuredObj.SetCreationTimestamp(metav1.Time{})
+ unstructuredObj.SetUID("")
+ fallthrough
+ case "create":
+ obj, err := buildResource.Dri.Create(context.TODO(), unstructuredObj, metav1.CreateOptions{})
+ if err != nil {
+ logrus.Errorf("k8s resource create error %v", err)
+ buildResource.Resource = unstructuredObj
+ buildResource.State = state
+ buildResource.ErrorOverview = err.Error()
+ } else {
+ buildResource.Resource = obj
+ buildResource.State = model.CreateSuccess
+ buildResource.ErrorOverview = fmt.Sprintf("创建成功")
+ }
+ case "delete":
+ err := buildResource.Dri.Delete(context.TODO(), name, metav1.DeleteOptions{})
+ if err != nil {
+ logrus.Errorf("delete k8s resource error %v", err)
+ }
+ case "update":
+ obj, err := buildResource.Dri.Update(context.TODO(), unstructuredObj, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("update k8s resource error %v", err)
+ buildResource.Resource = unstructuredObj
+ buildResource.State = state
+ buildResource.ErrorOverview = err.Error()
+ } else {
+ buildResource.Resource = obj
+ buildResource.State = model.UpdateSuccess
+ buildResource.ErrorOverview = fmt.Sprintf("更新成功")
+ }
+ }
+ }
+ return buildResourceList
+}
diff --git a/api/handler/resource_import.go b/api/handler/resource_import.go
new file mode 100644
index 000000000..4f156d0de
--- /dev/null
+++ b/api/handler/resource_import.go
@@ -0,0 +1,428 @@
+package handler
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/api/util"
+ "github.com/goodrain/rainbond/db"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ rainbondutil "github.com/goodrain/rainbond/util"
+ "github.com/goodrain/rainbond/util/constants"
+ "github.com/jinzhu/gorm"
+ "github.com/sirupsen/logrus"
+ "github.com/twinj/uuid"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "strings"
+ "time"
+)
+
+//ResourceImport Import the converted k8s resources into recognition
+func (c *clusterAction) ResourceImport(namespace string, as map[string]model.ApplicationResource, eid string) (*model.ReturnResourceImport, *util.APIHandleError) {
+ logrus.Infof("ResourceImport function begin")
+ var returnResourceImport model.ReturnResourceImport
+ err := db.GetManager().DB().Transaction(func(tx *gorm.DB) error {
+ tenant, err := c.createTenant(eid, namespace, tx)
+ returnResourceImport.Tenant = tenant
+ if err != nil {
+ logrus.Errorf("%v", err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create tenant error:%v", err)}
+ }
+ for appName, components := range as {
+ app, err := c.createApp(eid, tx, appName, tenant.UUID)
+ if err != nil {
+ logrus.Errorf("create app:%v err:%v", appName, err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create app:%v error:%v", appName, err)}
+ }
+ k8sResource, err := c.CreateK8sResource(tx, components.KubernetesResources, app.AppID)
+ if err != nil {
+ logrus.Errorf("create K8sResources err:%v", err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create K8sResources err:%v", err)}
+ }
+ var componentAttributes []model.ComponentAttributes
+ for _, componentResource := range components.ConvertResource {
+ component, err := c.CreateComponent(app, tenant.UUID, componentResource, namespace, false)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create app error:%v", err)}
+ }
+ c.createENV(componentResource.ENVManagement, component)
+ c.createConfig(componentResource.ConfigManagement, component)
+ c.createPort(componentResource.PortManagement, component)
+ componentResource.TelescopicManagement.RuleID = c.createTelescopic(componentResource.TelescopicManagement, component)
+ componentResource.HealthyCheckManagement.ProbeID = c.createHealthyCheck(componentResource.HealthyCheckManagement, component)
+ c.createK8sAttributes(componentResource.ComponentK8sAttributesManagement, tenant.UUID, component)
+ componentAttributes = append(componentAttributes, model.ComponentAttributes{
+ TS: component,
+ Image: componentResource.BasicManagement.Image,
+ Cmd: componentResource.BasicManagement.Cmd,
+ ENV: componentResource.ENVManagement,
+ Config: componentResource.ConfigManagement,
+ Port: componentResource.PortManagement,
+ Telescopic: componentResource.TelescopicManagement,
+ HealthyCheck: componentResource.HealthyCheckManagement,
+ ComponentK8sAttributes: componentResource.ComponentK8sAttributesManagement,
+ })
+ }
+ application := model.AppComponent{
+ App: app,
+ Component: componentAttributes,
+ K8sResources: k8sResource,
+ }
+ returnResourceImport.App = append(returnResourceImport.App, application)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 400, Err: fmt.Errorf("resource import error:%v", err)}
+ }
+ logrus.Infof("ResourceImport function end")
+ return &returnResourceImport, nil
+}
+
+func (c *clusterAction) createTenant(eid string, namespace string, tx *gorm.DB) (*dbmodel.Tenants, error) {
+ logrus.Infof("begin create tenant")
+ var dbts dbmodel.Tenants
+ id, name, errN := GetServiceManager().CreateTenandIDAndName(eid)
+ if errN != nil {
+ return nil, errN
+ }
+ dbts.EID = eid
+ dbts.Namespace = namespace
+ dbts.Name = name
+ dbts.UUID = id
+ dbts.LimitMemory = 0
+ tenant, _ := db.GetManager().TenantDao().GetTenantIDByName(dbts.Name)
+ if tenant != nil {
+ logrus.Warningf("tenant %v already exists", dbts.Name)
+ return tenant, nil
+ }
+ if err := db.GetManager().TenantDaoTransactions(tx).AddModel(&dbts); err != nil {
+ if !strings.HasSuffix(err.Error(), "is exist") {
+ return nil, err
+ }
+ }
+ ns, err := c.clientset.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{})
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get namespace %v:%v", namespace, err)}
+ }
+ if ns.Labels == nil {
+ ns.Labels = make(map[string]string)
+ }
+ ns.Labels[constants.ResourceManagedByLabel] = constants.Rainbond
+ _, err = c.clientset.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{})
+ if err != nil {
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to add label to namespace %v:%v", namespace, err)}
+ }
+ logrus.Infof("end create tenant")
+ return &dbts, nil
+}
+
+func (c *clusterAction) createApp(eid string, tx *gorm.DB, app string, tenantID string) (*dbmodel.Application, error) {
+ appID := rainbondutil.NewUUID()
+ application, _ := db.GetManager().ApplicationDaoTransactions(tx).GetAppByName(tenantID, app)
+ if application != nil {
+ logrus.Infof("app %v already exists", app)
+ return application, nil
+ }
+ appReq := &dbmodel.Application{
+ EID: eid,
+ TenantID: tenantID,
+ AppID: appID,
+ AppName: app,
+ AppType: "rainbond",
+ GovernanceMode: dbmodel.GovernanceModeKubernetesNativeService,
+ K8sApp: app,
+ }
+ if err := db.GetManager().ApplicationDaoTransactions(tx).AddModel(appReq); err != nil {
+ return appReq, err
+ }
+ return appReq, nil
+}
+
+func (c *clusterAction) CreateK8sResource(tx *gorm.DB, k8sResources []dbmodel.K8sResource, AppID string) ([]dbmodel.K8sResource, error) {
+ var k8sResourceList []*dbmodel.K8sResource
+ for _, k8sResource := range k8sResources {
+ k8sResource.AppID = AppID
+ kr := k8sResource
+ k8sResourceList = append(k8sResourceList, &kr)
+ }
+ err := db.GetManager().K8sResourceDaoTransactions(tx).CreateK8sResourceInBatch(k8sResourceList)
+ return k8sResources, err
+}
+
+func (c *clusterAction) CreateComponent(app *dbmodel.Application, tenantID string, component model.ConvertResource, namespace string, isYaml bool) (*dbmodel.TenantServices, error) {
+ var extendMethod string
+ switch component.BasicManagement.ResourceType {
+ case model.Deployment:
+ extendMethod = string(dbmodel.ServiceTypeStatelessMultiple)
+ case model.Job:
+ extendMethod = string(dbmodel.ServiceTypeJob)
+ case model.CronJob:
+ extendMethod = string(dbmodel.ServiceTypeCronJob)
+ case model.StateFulSet:
+ extendMethod = string(dbmodel.ServiceTypeStateMultiple)
+ }
+ serviceID := rainbondutil.NewUUID()
+ serviceAlias := "gr" + serviceID[len(serviceID)-6:]
+ replicas := 1
+ if component.BasicManagement.Replicas != nil {
+ replicas = int(*component.BasicManagement.Replicas)
+ }
+ JobStrategy, err := json.Marshal(component.BasicManagement.JobStrategy)
+ if err != nil {
+ logrus.Errorf("component %v BasicManagement.JobStrategy json error%v", component.ComponentsName, err)
+ }
+ ts := dbmodel.TenantServices{
+ TenantID: tenantID,
+ ServiceID: serviceID,
+ ServiceAlias: serviceAlias,
+ ServiceName: serviceAlias,
+ ServiceType: "application",
+ Comment: "docker run application",
+ ContainerCPU: int(component.BasicManagement.CPU),
+ ContainerMemory: int(component.BasicManagement.Memory),
+ ContainerGPU: 0,
+ UpgradeMethod: "Rolling",
+ ExtendMethod: extendMethod,
+ Replicas: replicas,
+ DeployVersion: time.Now().Format("20060102150405"),
+ Category: "app_publish",
+ CurStatus: "undeploy",
+ Status: 0,
+ Namespace: namespace,
+ UpdateTime: time.Now(),
+ Kind: "internal",
+ AppID: app.AppID,
+ K8sComponentName: component.ComponentsName,
+ JobStrategy: string(JobStrategy),
+ }
+ if err := db.GetManager().TenantServiceDao().AddModel(&ts); err != nil {
+ logrus.Errorf("add service error, %v", err)
+ return nil, err
+ }
+ if !isYaml {
+ changeLabel := func(label map[string]string) map[string]string {
+ label[constants.ResourceManagedByLabel] = constants.Rainbond
+ label["service_id"] = serviceID
+ label["version"] = ts.DeployVersion
+ label["creater_id"] = string(rainbondutil.NewTimeVersion())
+ label["migrator"] = "rainbond"
+ label["creator"] = "Rainbond"
+ return label
+ }
+ switch component.BasicManagement.ResourceType {
+ case model.Deployment:
+ dm, err := c.clientset.AppsV1().Deployments(namespace).Get(context.Background(), component.ComponentsName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("failed to get %v Deployments %v:%v", namespace, component.ComponentsName, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get Deployments %v:%v", namespace, err)}
+ }
+ if dm.Labels == nil {
+ dm.Labels = make(map[string]string)
+ }
+ if dm.Spec.Template.Labels == nil {
+ dm.Spec.Template.Labels = make(map[string]string)
+ }
+ dm.Labels = changeLabel(dm.Labels)
+ dm.Spec.Template.Labels = changeLabel(dm.Spec.Template.Labels)
+ _, err = c.clientset.AppsV1().Deployments(namespace).Update(context.Background(), dm, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("failed to update Deployments %v:%v", namespace, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to update Deployments %v:%v", namespace, err)}
+ }
+ case model.Job:
+ job, err := c.clientset.BatchV1().Jobs(namespace).Get(context.Background(), component.ComponentsName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("failed to get %v Jobs %v:%v", namespace, component.ComponentsName, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get Jobs %v:%v", namespace, err)}
+ }
+ if job.Labels == nil {
+ job.Labels = make(map[string]string)
+ }
+ job.Labels = changeLabel(job.Labels)
+ _, err = c.clientset.BatchV1().Jobs(namespace).Update(context.Background(), job, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("failed to update StatefulSets %v:%v", namespace, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to update StatefulSets %v:%v", namespace, err)}
+ }
+ case model.CronJob:
+ cr, err := c.clientset.BatchV1beta1().CronJobs(namespace).Get(context.Background(), component.ComponentsName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("failed to get %v CronJob %v:%v", namespace, component.ComponentsName, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get CronJob %v:%v", namespace, err)}
+ }
+ if cr.Labels == nil {
+ cr.Labels = make(map[string]string)
+ }
+ cr.Labels = changeLabel(cr.Labels)
+ if cr.Spec.JobTemplate.Labels == nil {
+ cr.Spec.JobTemplate.Labels = make(map[string]string)
+ }
+ cr.Spec.JobTemplate.Labels = changeLabel(cr.Spec.JobTemplate.Labels)
+ _, err = c.clientset.BatchV1beta1().CronJobs(namespace).Update(context.Background(), cr, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("failed to update CronJobs %v:%v", namespace, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to update CronJobs %v:%v", namespace, err)}
+ }
+ case model.StateFulSet:
+ sfs, err := c.clientset.AppsV1().StatefulSets(namespace).Get(context.Background(), component.ComponentsName, metav1.GetOptions{})
+ if err != nil {
+ logrus.Errorf("failed to get %v StatefulSets %v:%v", namespace, component.ComponentsName, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to get StatefulSets %v:%v", namespace, err)}
+ }
+ if sfs.Labels == nil {
+ sfs.Labels = make(map[string]string)
+ }
+ sfs.Labels = changeLabel(sfs.Labels)
+ if sfs.Spec.Template.Labels == nil {
+ sfs.Spec.Template.Labels = make(map[string]string)
+ }
+ sfs.Spec.Template.Labels = changeLabel(sfs.Spec.Template.Labels)
+ _, err = c.clientset.AppsV1().StatefulSets(namespace).Update(context.Background(), sfs, metav1.UpdateOptions{})
+ if err != nil {
+ logrus.Errorf("failed to update StatefulSets %v:%v", namespace, err)
+ return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("failed to update StatefulSets %v:%v", namespace, err)}
+ }
+ }
+ }
+ return &ts, nil
+
+}
+
+func (c *clusterAction) createENV(envs []model.ENVManagement, service *dbmodel.TenantServices) {
+ var envVar []*dbmodel.TenantServiceEnvVar
+ for _, env := range envs {
+ var envD dbmodel.TenantServiceEnvVar
+ envD.AttrName = env.ENVKey
+ envD.AttrValue = env.ENVValue
+ envD.TenantID = service.TenantID
+ envD.ServiceID = service.ServiceID
+ envD.ContainerPort = 0
+ envD.IsChange = true
+ envD.Name = env.ENVExplain
+ envD.Scope = "inner"
+ envVar = append(envVar, &envD)
+ }
+ if err := db.GetManager().TenantServiceEnvVarDao().CreateOrUpdateEnvsInBatch(envVar); err != nil {
+ logrus.Errorf("%v Environment variable creation failed:%v", service.ServiceAlias, err)
+ }
+}
+
+func (c *clusterAction) createConfig(configs []model.ConfigManagement, service *dbmodel.TenantServices) {
+ var configVar []*dbmodel.TenantServiceVolume
+ for _, config := range configs {
+ tsv := &dbmodel.TenantServiceVolume{
+ ServiceID: service.ServiceID,
+ VolumeName: config.ConfigName,
+ VolumePath: config.ConfigPath,
+ VolumeType: "config-file",
+ Category: "",
+ VolumeProviderName: "",
+ IsReadOnly: false,
+ VolumeCapacity: 0,
+ AccessMode: "RWX",
+ SharePolicy: "exclusive",
+ BackupPolicy: "exclusive",
+ ReclaimPolicy: "exclusive",
+ AllowExpansion: false,
+ Mode: &config.Mode,
+ }
+ configVar = append(configVar, tsv)
+ }
+ err := db.GetManager().TenantServiceVolumeDao().CreateOrUpdateVolumesInBatch(configVar)
+ if err != nil {
+ logrus.Errorf("%v configuration file creation failed:%v", service.ServiceAlias, err)
+ }
+}
+
+func (c *clusterAction) createPort(ports []model.PortManagement, service *dbmodel.TenantServices) {
+ var portVar []*dbmodel.TenantServicesPort
+ for _, port := range ports {
+ portAlias := strings.Replace(service.ServiceAlias, "-", "_", -1)
+ var vpD dbmodel.TenantServicesPort
+ vpD.ServiceID = service.ServiceID
+ vpD.TenantID = service.TenantID
+ vpD.IsInnerService = &port.Inner
+ vpD.IsOuterService = &port.Outer
+ vpD.ContainerPort = int(port.Port)
+ vpD.MappingPort = int(port.Port)
+ vpD.Protocol = port.Protocol
+ vpD.PortAlias = fmt.Sprintf("%v%v", strings.ToUpper(portAlias), port.Port)
+ vpD.K8sServiceName = fmt.Sprintf("%v-%v", service.ServiceAlias, port.Port)
+ portVar = append(portVar, &vpD)
+ }
+ if err := db.GetManager().TenantServicesPortDao().CreateOrUpdatePortsInBatch(portVar); err != nil {
+ logrus.Errorf("%v port creation failed:%v", service.ServiceAlias, err)
+ }
+}
+
+func (c *clusterAction) createTelescopic(telescopic model.TelescopicManagement, service *dbmodel.TenantServices) string {
+ if !telescopic.Enable {
+ return ""
+ }
+ r := &dbmodel.TenantServiceAutoscalerRules{
+ RuleID: rainbondutil.NewUUID(),
+ ServiceID: service.ServiceID,
+ Enable: true,
+ XPAType: "hpa",
+ MinReplicas: int(telescopic.MinReplicas),
+ MaxReplicas: int(telescopic.MaxReplicas),
+ }
+ telescopic.RuleID = r.RuleID
+ if err := db.GetManager().TenantServceAutoscalerRulesDao().AddModel(r); err != nil {
+ logrus.Errorf("%v TenantServiceAutoscalerRules creation failed:%v", service.ServiceAlias, err)
+ return ""
+ }
+ for _, metric := range telescopic.CPUOrMemory {
+ m := &dbmodel.TenantServiceAutoscalerRuleMetrics{
+ RuleID: r.RuleID,
+ MetricsType: metric.MetricsType,
+ MetricsName: metric.MetricsName,
+ MetricTargetType: metric.MetricTargetType,
+ MetricTargetValue: metric.MetricTargetValue,
+ }
+ if err := db.GetManager().TenantServceAutoscalerRuleMetricsDao().AddModel(m); err != nil {
+ logrus.Errorf("%v TenantServceAutoscalerRuleMetricsDao creation failed:%v", service.ServiceAlias, err)
+ }
+ }
+ return r.RuleID
+}
+
+func (c *clusterAction) createHealthyCheck(telescopic model.HealthyCheckManagement, service *dbmodel.TenantServices) string {
+ if telescopic.Status == 0 {
+ return ""
+ }
+ var tspD dbmodel.TenantServiceProbe
+ tspD.ServiceID = service.ServiceID
+ tspD.Cmd = telescopic.Command
+ tspD.FailureThreshold = telescopic.FailureThreshold
+ tspD.HTTPHeader = telescopic.HTTPHeader
+ tspD.InitialDelaySecond = telescopic.InitialDelaySecond
+ tspD.IsUsed = &telescopic.Status
+ tspD.Mode = telescopic.Mode
+ tspD.Path = telescopic.Path
+ tspD.PeriodSecond = telescopic.PeriodSecond
+ tspD.Port = telescopic.Port
+ tspD.ProbeID = strings.Replace(uuid.NewV4().String(), "-", "", -1)
+ tspD.Scheme = telescopic.DetectionMethod
+ tspD.SuccessThreshold = telescopic.SuccessThreshold
+ tspD.TimeoutSecond = telescopic.TimeoutSecond
+ tspD.FailureAction = ""
+ if err := GetServiceManager().ServiceProbe(&tspD, "add"); err != nil {
+ logrus.Errorf("%v createHealthyCheck creation failed:%v", service.ServiceAlias, err)
+ }
+ return tspD.ProbeID
+}
+
+func (c *clusterAction) createK8sAttributes(specials []*dbmodel.ComponentK8sAttributes, tenantID string, component *dbmodel.TenantServices) {
+ for _, specials := range specials {
+ specials.TenantID = tenantID
+ specials.ComponentID = component.ServiceID
+ }
+ err := db.GetManager().ComponentK8sAttributeDao().CreateOrUpdateAttributesInBatch(specials)
+ if err != nil {
+ logrus.Errorf("%v createSpecial creation failed:%v", component.ServiceAlias, err)
+ }
+}
diff --git a/api/handler/resource_public_function.go b/api/handler/resource_public_function.go
new file mode 100644
index 000000000..47918e089
--- /dev/null
+++ b/api/handler/resource_public_function.go
@@ -0,0 +1,417 @@
+package handler
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/goodrain/rainbond/api/model"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ "github.com/sirupsen/logrus"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "path"
+ "sigs.k8s.io/yaml"
+ "strconv"
+ "strings"
+)
+
+func (c *clusterAction) PodTemplateSpecResource(parameter model.YamlResourceParameter) {
+ //Port
+ var ps []model.PortManagement
+ for _, port := range parameter.Template.Spec.Containers[0].Ports {
+ if string(port.Protocol) == "UDP" {
+ ps = append(ps, model.PortManagement{
+ Port: port.ContainerPort,
+ Protocol: "UDP",
+ Inner: false,
+ Outer: false,
+ })
+ } else {
+ ps = append(ps, model.PortManagement{
+ Port: port.ContainerPort,
+ Protocol: "TCP",
+ Inner: false,
+ Outer: false,
+ })
+ }
+ logrus.Warningf("Transport protocol type not recognized%v", port.Protocol)
+ }
+
+ //ENV
+ var envs []model.ENVManagement
+ for i := 0; i < len(parameter.Template.Spec.Containers[0].Env); i++ {
+ if cm := parameter.Template.Spec.Containers[0].Env[i].ValueFrom; cm == nil {
+ envs = append(envs, model.ENVManagement{
+ ENVKey: parameter.Template.Spec.Containers[0].Env[i].Name,
+ ENVValue: parameter.Template.Spec.Containers[0].Env[i].Value,
+ ENVExplain: "",
+ })
+ parameter.Template.Spec.Containers[0].Env = append(parameter.Template.Spec.Containers[0].Env[:i], parameter.Template.Spec.Containers[0].Env[i+1:]...)
+ }
+ }
+
+ //Configs
+ var configs []model.ConfigManagement
+ //这一块是处理配置文件
+ //配置文件的名字最终都是configmap里面的key值。
+ //volume在被挂载后存在四种情况
+ //第一种是volume存在items,volumeMount的SubPath不等于空。路径直接是volumeMount里面的mountPath。
+ //第二种是volume存在items,volumeMount的SubPath等于空。路径则变成volumeMount里面的mountPath拼接上items里面每一个元素的key值。
+ //第三种是volume不存在items,volumeMount的SubPath不等于空。路径直接是volumeMount里面的mountPath。
+ //第四种是volume不存在items,volumeMount的SubPath等于空。路径则变成volumeMount里面的mountPath拼接上configmap资源里面每一个元素的key值
+ cmMap := make(map[string]corev1.ConfigMap)
+ cmList, err := c.clientset.CoreV1().ConfigMaps(parameter.Namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get ConfigMap%v", err)
+ }
+ for _, cm := range cmList.Items {
+ cmMap[cm.Name] = cm
+ }
+ cmList.Items = append(cmList.Items, parameter.CMs...)
+ for _, volume := range parameter.Template.Spec.Volumes {
+ if volume.ConfigMap != nil && err == nil {
+ cm, _ := cmMap[volume.ConfigMap.Name]
+ cmData := cm.Data
+ isLog := true
+ var index int
+ for i, volumeMount := range parameter.Template.Spec.Containers[0].VolumeMounts {
+ if volume.Name != volumeMount.Name {
+ continue
+ }
+ isLog = false
+ index = i
+ if volume.ConfigMap.Items != nil {
+ if volumeMount.SubPath != "" {
+ configName := ""
+ var mode int32
+ for _, item := range volume.ConfigMap.Items {
+ if item.Path == volumeMount.SubPath {
+ configName = item.Key
+ mode = *item.Mode
+ }
+ }
+ configs = append(configs, model.ConfigManagement{
+ ConfigName: configName,
+ ConfigPath: volumeMount.MountPath,
+ ConfigValue: cmData[configName],
+ Mode: mode,
+ })
+ continue
+ }
+ p := volumeMount.MountPath
+ for _, item := range volume.ConfigMap.Items {
+ p := path.Join(p, item.Path)
+ var mode int32
+ if item.Mode != nil {
+ mode = *item.Mode
+ }
+ configs = append(configs, model.ConfigManagement{
+ ConfigName: item.Key,
+ ConfigPath: p,
+ ConfigValue: cmData[item.Key],
+ Mode: mode,
+ })
+ }
+ } else {
+ mode := int32(777)
+ if volume.ConfigMap.DefaultMode != nil {
+ mode = *volume.ConfigMap.DefaultMode
+ }
+ if volumeMount.SubPath != "" {
+ configs = append(configs, model.ConfigManagement{
+ ConfigName: volumeMount.SubPath,
+ ConfigPath: volumeMount.MountPath,
+ ConfigValue: cmData[volumeMount.SubPath],
+ Mode: mode,
+ })
+ continue
+ }
+ mountPath := volumeMount.MountPath
+ for key, val := range cmData {
+ mountPath = path.Join(mountPath, key)
+ configs = append(configs, model.ConfigManagement{
+ ConfigName: key,
+ ConfigPath: mountPath,
+ ConfigValue: val,
+ Mode: *volume.ConfigMap.DefaultMode,
+ })
+ }
+ }
+ }
+ if isLog {
+ logrus.Warningf("configmap type resource %v is not mounted in volumemount", volume.ConfigMap.Name)
+ continue
+ }
+ parameter.Template.Spec.Containers[0].VolumeMounts = append(parameter.Template.Spec.Containers[0].VolumeMounts[:index], parameter.Template.Spec.Containers[0].VolumeMounts[index+1:]...)
+ }
+ }
+
+ //TelescopicManagement
+ HPAList, err := c.clientset.AutoscalingV1().HorizontalPodAutoscalers(parameter.Namespace).List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ logrus.Errorf("Failed to get HorizontalPodAutoscalers list:%v", err)
+ }
+ HPAList.Items = append(HPAList.Items, parameter.HPAs...)
+ var t model.TelescopicManagement
+ //这一块就是自动伸缩的对应解析,
+ //需要注意的一点是hpa的cpu和memory的阈值设置是通过Annotations["autoscaling.alpha.kubernetes.io/metrics"]字段设置
+ //而且它的返回值是个json字符串所以设置了一个结构体进行解析。
+ for _, hpa := range HPAList.Items {
+ if (hpa.Spec.ScaleTargetRef.Kind != model.Deployment && hpa.Spec.ScaleTargetRef.Kind != model.StateFulSet) || hpa.Spec.ScaleTargetRef.Name != parameter.Name {
+ t.Enable = false
+ continue
+ }
+ t.Enable = true
+ t.MinReplicas = *hpa.Spec.MinReplicas
+ t.MaxReplicas = hpa.Spec.MaxReplicas
+ var cpuormemorys []*dbmodel.TenantServiceAutoscalerRuleMetrics
+ cpuUsage := hpa.Spec.TargetCPUUtilizationPercentage
+ if cpuUsage != nil {
+ cpuormemorys = append(cpuormemorys, &dbmodel.TenantServiceAutoscalerRuleMetrics{
+ MetricsType: "resource_metrics",
+ MetricsName: "cpu",
+ MetricTargetType: "utilization",
+ MetricTargetValue: int(*cpuUsage),
+ })
+ }
+ CPUAndMemoryJSON, ok := hpa.Annotations["autoscaling.alpha.kubernetes.io/metrics"]
+ if ok {
+ type com struct {
+ T string `json:"type"`
+ Resource map[string]interface{}
+ }
+ var c []com
+ err := json.Unmarshal([]byte(CPUAndMemoryJSON), &c)
+ if err != nil {
+ logrus.Errorf("autoscaling.alpha.kubernetes.io/metrics parsing failed:%v", err)
+ }
+
+ for _, cpuormemory := range c {
+ switch cpuormemory.Resource["name"] {
+ case "cpu":
+ cpu := fmt.Sprint(cpuormemory.Resource["targetAverageValue"])
+ cpuUnit := cpu[len(cpu)-1:]
+ var cpuUsage int
+ if cpuUnit == "m" {
+ cpuUsage, _ = strconv.Atoi(cpu[:len(cpu)-1])
+ }
+ if cpuUnit == "g" || cpuUnit == "G" {
+ cpuUsage, _ = strconv.Atoi(cpu[:len(cpu)-1])
+ cpuUsage = cpuUsage * 1024
+ }
+ cpuormemorys = append(cpuormemorys, &dbmodel.TenantServiceAutoscalerRuleMetrics{
+ MetricsType: "resource_metrics",
+ MetricsName: "cpu",
+ MetricTargetType: "average_value",
+ MetricTargetValue: cpuUsage,
+ })
+ case "memory":
+ memory := fmt.Sprint(cpuormemory.Resource["targetAverageValue"])
+ memoryUnit := memory[:len(memory)-1]
+ var MemoryUsage int
+ if memoryUnit == "m" {
+ MemoryUsage, _ = strconv.Atoi(memory[:len(memory)-1])
+ }
+ if memoryUnit == "g" || memoryUnit == "G" {
+ MemoryUsage, _ = strconv.Atoi(memory[:len(memory)-1])
+ MemoryUsage = MemoryUsage * 1024
+ }
+ cpuormemorys = append(cpuormemorys, &dbmodel.TenantServiceAutoscalerRuleMetrics{
+ MetricsType: "resource_metrics",
+ MetricsName: "cpu",
+ MetricTargetType: "average_value",
+ MetricTargetValue: MemoryUsage,
+ })
+ }
+
+ }
+ }
+ t.CPUOrMemory = cpuormemorys
+ }
+
+ //HealthyCheckManagement
+ var hcm model.HealthyCheckManagement
+ livenessProbe := parameter.Template.Spec.Containers[0].LivenessProbe
+ if livenessProbe != nil {
+ var httpHeaders []string
+ if livenessProbe.HTTPGet != nil {
+ for _, httpHeader := range livenessProbe.HTTPGet.HTTPHeaders {
+ nv := httpHeader.Name + "=" + httpHeader.Value
+ httpHeaders = append(httpHeaders, nv)
+ }
+ hcm.DetectionMethod = strings.ToLower(string(livenessProbe.HTTPGet.Scheme))
+ hcm.Path = livenessProbe.HTTPGet.Path
+ hcm.Port = int(livenessProbe.HTTPGet.Port.IntVal)
+ }
+ hcm.Status = 1
+ if livenessProbe.Exec != nil {
+ hcm.Command = strings.Join(livenessProbe.Exec.Command, " ")
+ }
+ hcm.HTTPHeader = strings.Join(httpHeaders, ",")
+ hcm.Mode = "liveness"
+ hcm.InitialDelaySecond = int(livenessProbe.InitialDelaySeconds)
+ hcm.PeriodSecond = int(livenessProbe.PeriodSeconds)
+ hcm.TimeoutSecond = int(livenessProbe.TimeoutSeconds)
+ hcm.FailureThreshold = int(livenessProbe.FailureThreshold)
+ hcm.SuccessThreshold = int(livenessProbe.SuccessThreshold)
+ } else {
+ readinessProbe := parameter.Template.Spec.Containers[0].ReadinessProbe
+ if readinessProbe != nil {
+ var httpHeaders []string
+ if readinessProbe.HTTPGet != nil {
+ for _, httpHeader := range readinessProbe.HTTPGet.HTTPHeaders {
+ nv := httpHeader.Name + "=" + httpHeader.Value
+ httpHeaders = append(httpHeaders, nv)
+ }
+ hcm.DetectionMethod = strings.ToLower(string(readinessProbe.HTTPGet.Scheme))
+ hcm.Path = readinessProbe.HTTPGet.Path
+ hcm.Port = int(readinessProbe.HTTPGet.Port.IntVal)
+ }
+ hcm.Status = 1
+ hcm.Mode = "readiness"
+ if readinessProbe.Exec != nil {
+ hcm.Command = strings.Join(readinessProbe.Exec.Command, " ")
+ }
+ hcm.HTTPHeader = strings.Join(httpHeaders, ",")
+ hcm.InitialDelaySecond = int(readinessProbe.InitialDelaySeconds)
+ hcm.PeriodSecond = int(readinessProbe.PeriodSeconds)
+ hcm.TimeoutSecond = int(readinessProbe.TimeoutSeconds)
+ hcm.FailureThreshold = int(readinessProbe.FailureThreshold)
+ hcm.SuccessThreshold = int(readinessProbe.SuccessThreshold)
+ }
+ }
+
+ var attributes []*dbmodel.ComponentK8sAttributes
+ if parameter.Template.Spec.Containers[0].Env != nil && len(parameter.Template.Spec.Containers[0].Env) > 0 {
+ envYaml, err := ObjectToJSONORYaml("yaml", parameter.Template.Spec.Containers[0].Env)
+ if err != nil {
+ logrus.Errorf("deployment:%v env %v", parameter.Name, err)
+ }
+ envAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameENV,
+ SaveType: "yaml",
+ AttributeValue: envYaml,
+ }
+ attributes = append(attributes, envAttributes)
+ }
+ if parameter.Template.Spec.Volumes != nil {
+ volumesYaml, err := ObjectToJSONORYaml("yaml", parameter.Template.Spec.Volumes)
+ if err != nil {
+ logrus.Errorf("deployment:%v volumes %v", parameter.Name, err)
+ }
+ volumesAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameVolumes,
+ SaveType: "yaml",
+ AttributeValue: volumesYaml,
+ }
+ attributes = append(attributes, volumesAttributes)
+
+ }
+ if parameter.Template.Spec.Containers[0].VolumeMounts != nil {
+ volumeMountsYaml, err := ObjectToJSONORYaml("yaml", parameter.Template.Spec.Containers[0].VolumeMounts)
+ if err != nil {
+ logrus.Errorf("deployment:%v volumeMounts %v", parameter.Name, err)
+ }
+ volumeMountsAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameVolumeMounts,
+ SaveType: "yaml",
+ AttributeValue: volumeMountsYaml,
+ }
+ attributes = append(attributes, volumeMountsAttributes)
+ }
+ if parameter.Template.Spec.ServiceAccountName != "" {
+ serviceAccountAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameServiceAccountName,
+ SaveType: "string",
+ AttributeValue: parameter.Template.Spec.ServiceAccountName,
+ }
+ attributes = append(attributes, serviceAccountAttributes)
+ }
+ if parameter.RsLabel != nil {
+ labelsJSON, err := ObjectToJSONORYaml("json", parameter.RsLabel)
+ if err != nil {
+ logrus.Errorf("deployment:%v labels %v", parameter.Name, err)
+ }
+ labelsAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameLabels,
+ SaveType: "json",
+ AttributeValue: labelsJSON,
+ }
+ attributes = append(attributes, labelsAttributes)
+ }
+
+ if parameter.Template.Spec.NodeSelector != nil {
+ NodeSelectorJSON, err := ObjectToJSONORYaml("json", parameter.Template.Spec.NodeSelector)
+ if err != nil {
+ logrus.Errorf("deployment:%v nodeSelector %v", parameter.Name, err)
+ }
+ nodeSelectorAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameNodeSelector,
+ SaveType: "json",
+ AttributeValue: NodeSelectorJSON,
+ }
+ attributes = append(attributes, nodeSelectorAttributes)
+ }
+ if parameter.Template.Spec.Tolerations != nil {
+ tolerationsYaml, err := ObjectToJSONORYaml("yaml", parameter.Template.Spec.Tolerations)
+ if err != nil {
+ logrus.Errorf("deployment:%v tolerations %v", parameter.Name, err)
+ }
+ tolerationsAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameTolerations,
+ SaveType: "yaml",
+ AttributeValue: tolerationsYaml,
+ }
+ attributes = append(attributes, tolerationsAttributes)
+ }
+ if parameter.Template.Spec.Affinity != nil {
+ affinityYaml, err := ObjectToJSONORYaml("yaml", parameter.Template.Spec.Affinity)
+ if err != nil {
+ logrus.Errorf("deployment:%v affinity %v", parameter.Name, err)
+ }
+ affinityAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNameAffinity,
+ SaveType: "yaml",
+ AttributeValue: affinityYaml,
+ }
+ attributes = append(attributes, affinityAttributes)
+ }
+ if securityContext := parameter.Template.Spec.Containers[0].SecurityContext; securityContext != nil && securityContext.Privileged != nil {
+ privilegedAttributes := &dbmodel.ComponentK8sAttributes{
+ Name: dbmodel.K8sAttributeNamePrivileged,
+ SaveType: "string",
+ AttributeValue: strconv.FormatBool(*securityContext.Privileged),
+ }
+ attributes = append(attributes, privilegedAttributes)
+ }
+
+ *parameter.ComponentsCR = append(*parameter.ComponentsCR, model.ConvertResource{
+ ComponentsName: parameter.Name,
+ BasicManagement: parameter.Basic,
+ PortManagement: ps,
+ ENVManagement: envs,
+ ConfigManagement: configs,
+ TelescopicManagement: t,
+ HealthyCheckManagement: hcm,
+ ComponentK8sAttributesManagement: attributes,
+ })
+}
+
+//ObjectToJSONORYaml changeType true is json / yaml
+func ObjectToJSONORYaml(changeType string, data interface{}) (string, error) {
+ if data == nil {
+ return "", nil
+ }
+ dataJSON, err := json.Marshal(data)
+ if err != nil {
+ return "", fmt.Errorf("json serialization failed err:%v", err)
+ }
+ if changeType == "json" {
+ return string(dataJSON), nil
+ }
+ dataYaml, err := yaml.JSONToYAML(dataJSON)
+ if err != nil {
+ return "", fmt.Errorf("yaml serialization failed err:%v", err)
+ }
+ return string(dataYaml), nil
+}
diff --git a/api/handler/service.go b/api/handler/service.go
index 78b9043d0..64156b6e5 100644
--- a/api/handler/service.go
+++ b/api/handler/service.go
@@ -937,6 +937,9 @@ func (s *ServiceAction) ServiceUpdate(sc map[string]interface{}) error {
ts.ExtendMethod = extendMethod
ts.ServiceType = extendMethod
}
+ if js, ok := sc["job_strategy"].(string); ok {
+ ts.JobStrategy = js
+ }
//update component
if err := db.GetManager().TenantServiceDao().UpdateModel(ts); err != nil {
logrus.Errorf("update service error, %v", err)
@@ -2914,6 +2917,28 @@ func (s *ServiceAction) SyncComponentEndpoints(tx *gorm.DB, components []*api_mo
return db.GetManager().ThirdPartySvcDiscoveryCfgDaoTransactions(tx).CreateOrUpdate3rdSvcDiscoveryCfgInBatch(thirdPartySvcDiscoveryCfgs)
}
+// SyncComponentK8sAttributes -
+func (s *ServiceAction) SyncComponentK8sAttributes(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error {
+ var (
+ componentIDs []string
+ k8sAttributes []*dbmodel.ComponentK8sAttributes
+ )
+ for _, component := range components {
+ if component.ComponentK8sAttributes == nil || len(component.ComponentK8sAttributes) == 0 {
+ continue
+ }
+ componentIDs = append(componentIDs, component.ComponentBase.ComponentID)
+ for _, k8sAttribute := range component.ComponentK8sAttributes {
+ k8sAttributes = append(k8sAttributes, k8sAttribute.DbModel(app.TenantID, component.ComponentBase.ComponentID))
+ }
+ }
+
+ if err := db.GetManager().ComponentK8sAttributeDaoTransactions(tx).DeleteByComponentIDs(componentIDs); err != nil {
+ return err
+ }
+ return db.GetManager().ComponentK8sAttributeDaoTransactions(tx).CreateOrUpdateAttributesInBatch(k8sAttributes)
+}
+
// Log returns the logs reader for a container in a pod, a pod or a component.
func (s *ServiceAction) Log(w http.ResponseWriter, r *http.Request, component *dbmodel.TenantServices, podName, containerName string, follow bool) error {
// If podName and containerName is missing, return the logs reader for the component
@@ -2985,6 +3010,8 @@ func TransStatus(eStatus string) string {
return "未部署"
case "deployed":
return "已部署"
+ case "succeeded":
+ return "已完成"
}
return ""
}
diff --git a/api/handler/service_handler.go b/api/handler/service_handler.go
index 98a771a51..d2687802c 100644
--- a/api/handler/service_handler.go
+++ b/api/handler/service_handler.go
@@ -90,6 +90,10 @@ type ServiceHandler interface {
DeleteServiceMonitor(tenantID, serviceID, name string) (*dbmodel.TenantServiceMonitor, error)
AddServiceMonitor(tenantID, serviceID string, add api_model.AddServiceMonitorRequestStruct) (*dbmodel.TenantServiceMonitor, error)
+ CreateK8sAttribute(tenantID, componentID string, k8sAttr *api_model.ComponentK8sAttribute) error
+ UpdateK8sAttribute(componentID string, k8sAttributes *api_model.ComponentK8sAttribute) error
+ DeleteK8sAttribute(componentID, name string) error
+
SyncComponentBase(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentMonitors(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentPorts(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
@@ -103,6 +107,7 @@ type ServiceHandler interface {
SyncComponentPlugins(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentScaleRules(tx *gorm.DB, components []*api_model.Component) error
SyncComponentEndpoints(tx *gorm.DB, components []*api_model.Component) error
+ SyncComponentK8sAttributes(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
Log(w http.ResponseWriter, r *http.Request, component *dbmodel.TenantServices, podName, containerName string, follow bool) error
}
diff --git a/api/handler/service_operation.go b/api/handler/service_operation.go
index d87072b00..676c2a462 100644
--- a/api/handler/service_operation.go
+++ b/api/handler/service_operation.go
@@ -319,6 +319,7 @@ func (o *OperationHandler) buildFromImage(r *model.ComponentBuildReq, service *d
func (o *OperationHandler) buildFromSourceCode(r *model.ComponentBuildReq, service *dbmodel.TenantServices) error {
if r.CodeInfo.RepoURL == "" || r.CodeInfo.Branch == "" || r.DeployVersion == "" {
+ logrus.Infof("r.CodeInfo.RepoURL:%v, r.CodeInfo.Branch:%v, r.DeployVersion:%v", r.CodeInfo.RepoURL, r.CodeInfo.Branch, r.DeployVersion)
return fmt.Errorf("build from code failure, args error")
}
body := make(map[string]interface{})
diff --git a/api/handler/yaml_resource.go b/api/handler/yaml_resource.go
new file mode 100644
index 000000000..4cc9a81cb
--- /dev/null
+++ b/api/handler/yaml_resource.go
@@ -0,0 +1,422 @@
+package handler
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ api_model "github.com/goodrain/rainbond/api/model"
+ "github.com/goodrain/rainbond/api/util"
+ "github.com/goodrain/rainbond/db"
+ dbmodel "github.com/goodrain/rainbond/db/model"
+ "github.com/jinzhu/gorm"
+ "github.com/sirupsen/logrus"
+ "io/ioutil"
+ appv1 "k8s.io/api/apps/v1"
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ batchv1 "k8s.io/api/batch/v1"
+ "k8s.io/api/batch/v1beta1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
+ yamlt "k8s.io/apimachinery/pkg/util/yaml"
+ "k8s.io/client-go/dynamic"
+ "path"
+ "path/filepath"
+ "strings"
+)
+
+//AppYamlResourceName -
+func (c *clusterAction) AppYamlResourceName(yamlResource api_model.YamlResource) (map[string]api_model.LabelResource, *util.APIHandleError) {
+ logrus.Infof("AppYamlResourceName begin")
+ fileResource := make(map[string]api_model.LabelResource)
+ k8sResourceObjects := c.YamlToResource(yamlResource)
+ var DeployNames, JobNames, CJNames, SFSNames, RoleNames, HPANames, RBNames, SANames, SecretNames, ServiceNames, CMNames, NetworkPolicyNames, IngressNames, PVCNames []string
+ defaultResource := make(map[string][]string)
+ for _, k8sResourceObject := range k8sResourceObjects {
+ if k8sResourceObject.Error != "" {
+ fileResource[k8sResourceObject.FileName] = api_model.LabelResource{
+ Status: k8sResourceObject.Error,
+ }
+ continue
+ }
+ for _, buildResource := range k8sResourceObject.BuildResources {
+ switch buildResource.Resource.GetKind() {
+ case api_model.Deployment:
+ DeployNames = append(DeployNames, buildResource.Resource.GetName())
+ case api_model.Job:
+ JobNames = append(JobNames, buildResource.Resource.GetName())
+ case api_model.CronJob:
+ CJNames = append(CJNames, buildResource.Resource.GetName())
+ case api_model.StateFulSet:
+ SFSNames = append(SFSNames, buildResource.Resource.GetName())
+ case api_model.Role:
+ RoleNames = append(RoleNames, buildResource.Resource.GetName())
+ case api_model.HorizontalPodAutoscaler:
+ HPANames = append(HPANames, buildResource.Resource.GetName())
+ case api_model.RoleBinding:
+ RBNames = append(RBNames, buildResource.Resource.GetName())
+ case api_model.ServiceAccount:
+ SANames = append(SANames, buildResource.Resource.GetName())
+ case api_model.Secret:
+ SecretNames = append(SecretNames, buildResource.Resource.GetName())
+ case api_model.Service:
+ ServiceNames = append(ServiceNames, buildResource.Resource.GetName())
+ case api_model.ConfigMap:
+ CMNames = append(CMNames, buildResource.Resource.GetName())
+ case api_model.NetworkPolicy:
+ NetworkPolicyNames = append(NetworkPolicyNames, buildResource.Resource.GetName())
+ case api_model.Ingress:
+ IngressNames = append(IngressNames, buildResource.Resource.GetName())
+ case api_model.PVC:
+ PVCNames = append(PVCNames, buildResource.Resource.GetName())
+ default:
+ defaultNames, ok := defaultResource[buildResource.Resource.GetKind()]
+ if ok {
+ defaultResource[buildResource.Resource.GetKind()] = append(defaultNames, buildResource.Resource.GetName())
+ } else {
+ defaultResource[buildResource.Resource.GetKind()] = []string{buildResource.Resource.GetName()}
+ }
+ }
+ }
+ }
+ fileResource["app_resource"] = api_model.LabelResource{
+ UnSupport: defaultResource,
+ Workloads: api_model.WorkLoadsResource{
+ Deployments: DeployNames,
+ Jobs: JobNames,
+ CronJobs: CJNames,
+ StateFulSets: SFSNames,
+ },
+ Others: api_model.OtherResource{
+ Services: ServiceNames,
+ PVC: PVCNames,
+ Ingresses: IngressNames,
+ NetworkPolicies: NetworkPolicyNames,
+ ConfigMaps: CMNames,
+ Secrets: SecretNames,
+ ServiceAccounts: ServiceNames,
+ RoleBindings: RoleNames,
+ HorizontalPodAutoscalers: HPANames,
+ Roles: RoleNames,
+ },
+ Status: "",
+ }
+ logrus.Infof("AppYamlResourceName end")
+ return fileResource, nil
+}
+
+//AppYamlResourceDetailed -
+func (c *clusterAction) AppYamlResourceDetailed(yamlResource api_model.YamlResource, yamlImport bool) (api_model.ApplicationResource, *util.APIHandleError) {
+ logrus.Infof("AppYamlResourceDetailed begin")
+ k8sResourceObjects := c.YamlToResource(yamlResource)
+ var K8SResource []dbmodel.K8sResource
+ var ConvertResource []api_model.ConvertResource
+ for _, k8sResourceObject := range k8sResourceObjects {
+ if k8sResourceObject.Error != "" {
+ continue
+ }
+ var cms []corev1.ConfigMap
+ var hpas []autoscalingv1.HorizontalPodAutoscaler
+ for _, buildResource := range k8sResourceObject.BuildResources {
+ if buildResource.Resource.GetKind() == api_model.ConfigMap {
+ var cm corev1.ConfigMap
+ cmJSON, _ := json.Marshal(buildResource.Resource)
+ json.Unmarshal(cmJSON, &cm)
+ cms = append(cms, cm)
+ continue
+ }
+ if buildResource.Resource.GetKind() == api_model.HorizontalPodAutoscaler {
+ var hpa autoscalingv1.HorizontalPodAutoscaler
+ cmJSON, _ := json.Marshal(buildResource.Resource)
+ json.Unmarshal(cmJSON, &hpa)
+ hpas = append(hpas, hpa)
+ }
+ }
+
+ for _, buildResource := range k8sResourceObject.BuildResources {
+ errorOverview := "创建成功"
+ state := 1
+ switch buildResource.Resource.GetKind() {
+ case api_model.Deployment:
+ deployJSON, _ := json.Marshal(buildResource.Resource)
+ var deployObject appv1.Deployment
+ json.Unmarshal(deployJSON, &deployObject)
+ basic := api_model.BasicManagement{
+ ResourceType: api_model.Deployment,
+ Replicas: deployObject.Spec.Replicas,
+ Memory: deployObject.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: deployObject.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: deployObject.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(deployObject.Spec.Template.Spec.Containers[0].Command, deployObject.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := api_model.YamlResourceParameter{
+ ComponentsCR: &ConvertResource,
+ Basic: basic,
+ Template: deployObject.Spec.Template,
+ Namespace: yamlResource.Namespace,
+ Name: buildResource.Resource.GetName(),
+ RsLabel: deployObject.Labels,
+ HPAs: hpas,
+ CMs: cms,
+ }
+ c.PodTemplateSpecResource(parameter)
+ case api_model.Job:
+ jobJSON, _ := json.Marshal(buildResource.Resource)
+ var jobObject batchv1.Job
+ json.Unmarshal(jobJSON, &jobObject)
+ basic := api_model.BasicManagement{
+ ResourceType: api_model.Job,
+ Replicas: jobObject.Spec.Completions,
+ Memory: jobObject.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: jobObject.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: jobObject.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(jobObject.Spec.Template.Spec.Containers[0].Command, jobObject.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := api_model.YamlResourceParameter{
+ ComponentsCR: &ConvertResource,
+ Basic: basic,
+ Template: jobObject.Spec.Template,
+ Namespace: yamlResource.Namespace,
+ Name: buildResource.Resource.GetName(),
+ RsLabel: jobObject.Labels,
+ HPAs: hpas,
+ CMs: cms,
+ }
+ c.PodTemplateSpecResource(parameter)
+ case api_model.CronJob:
+ cjJSON, _ := json.Marshal(buildResource.Resource)
+ var cjObject v1beta1.CronJob
+ json.Unmarshal(cjJSON, &cjObject)
+ basic := api_model.BasicManagement{
+ ResourceType: api_model.CronJob,
+ Replicas: cjObject.Spec.JobTemplate.Spec.Completions,
+ Memory: cjObject.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: cjObject.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: cjObject.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(cjObject.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command, cjObject.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := api_model.YamlResourceParameter{
+ ComponentsCR: &ConvertResource,
+ Basic: basic,
+ Template: cjObject.Spec.JobTemplate.Spec.Template,
+ Namespace: yamlResource.Namespace,
+ Name: buildResource.Resource.GetName(),
+ RsLabel: cjObject.Labels,
+ HPAs: hpas,
+ CMs: cms,
+ }
+ c.PodTemplateSpecResource(parameter)
+ case api_model.StateFulSet:
+ sfsJSON, _ := json.Marshal(buildResource.Resource)
+ var sfsObject appv1.StatefulSet
+ json.Unmarshal(sfsJSON, &sfsObject)
+ basic := api_model.BasicManagement{
+ ResourceType: api_model.StateFulSet,
+ Replicas: sfsObject.Spec.Replicas,
+ Memory: sfsObject.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value() / 1024 / 1024,
+ CPU: sfsObject.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().Value(),
+ Image: sfsObject.Spec.Template.Spec.Containers[0].Image,
+ Cmd: strings.Join(append(sfsObject.Spec.Template.Spec.Containers[0].Command, sfsObject.Spec.Template.Spec.Containers[0].Args...), " "),
+ }
+ parameter := api_model.YamlResourceParameter{
+ ComponentsCR: &ConvertResource,
+ Basic: basic,
+ Template: sfsObject.Spec.Template,
+ Namespace: yamlResource.Namespace,
+ Name: buildResource.Resource.GetName(),
+ RsLabel: sfsObject.Labels,
+ HPAs: hpas,
+ CMs: cms,
+ }
+ c.PodTemplateSpecResource(parameter)
+ default:
+ if yamlImport {
+ resource, err := c.ResourceCreate(buildResource, yamlResource.Namespace)
+ if err != nil {
+ errorOverview = err.Error()
+ state = api_model.CreateError
+ } else {
+ buildResource.Resource = resource
+ }
+ }
+ kubernetesResourcesYAML, err := ObjectToJSONORYaml("yaml", buildResource.Resource)
+ if err != nil {
+ logrus.Errorf("namespace:%v %v:%v error: %v", yamlResource.Namespace, buildResource.Resource.GetKind(), buildResource.Resource.GetName(), err)
+ }
+ K8SResource = append(K8SResource, dbmodel.K8sResource{
+ Name: buildResource.Resource.GetName(),
+ Kind: buildResource.Resource.GetKind(),
+ Content: kubernetesResourcesYAML,
+ State: state,
+ ErrorOverview: errorOverview,
+ })
+ }
+ }
+
+ }
+ logrus.Infof("AppYamlResourceDetailed end")
+ return api_model.ApplicationResource{
+ K8SResource,
+ ConvertResource,
+ }, nil
+}
+
+//AppYamlResourceImport -
+func (c *clusterAction) AppYamlResourceImport(yamlResource api_model.YamlResource, components api_model.ApplicationResource) (api_model.AppComponent, *util.APIHandleError) {
+ logrus.Infof("AppYamlResourceImport begin")
+ app, err := db.GetManager().ApplicationDao().GetAppByID(yamlResource.AppID)
+ if err != nil {
+ return api_model.AppComponent{}, &util.APIHandleError{Code: 400, Err: fmt.Errorf("GetAppByID error %v", err)}
+ }
+ var ar api_model.AppComponent
+ err = db.GetManager().DB().Transaction(func(tx *gorm.DB) error {
+ k8sResource, err := c.CreateK8sResource(tx, components.KubernetesResources, app.AppID)
+ if err != nil {
+ logrus.Errorf("create K8sResources err:%v", err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create K8sResources err:%v", err)}
+ }
+ var componentAttributes []api_model.ComponentAttributes
+ for _, componentResource := range components.ConvertResource {
+ component, err := c.CreateComponent(app, yamlResource.TenantID, componentResource, yamlResource.Namespace, true)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ return &util.APIHandleError{Code: 400, Err: fmt.Errorf("create app error:%v", err)}
+ }
+ c.createENV(componentResource.ENVManagement, component)
+ c.createConfig(componentResource.ConfigManagement, component)
+ c.createPort(componentResource.PortManagement, component)
+ componentResource.TelescopicManagement.RuleID = c.createTelescopic(componentResource.TelescopicManagement, component)
+ componentResource.HealthyCheckManagement.ProbeID = c.createHealthyCheck(componentResource.HealthyCheckManagement, component)
+ c.createK8sAttributes(componentResource.ComponentK8sAttributesManagement, yamlResource.TenantID, component)
+ componentAttributes = append(componentAttributes, api_model.ComponentAttributes{
+ TS: component,
+ Image: componentResource.BasicManagement.Image,
+ Cmd: componentResource.BasicManagement.Cmd,
+ ENV: componentResource.ENVManagement,
+ Config: componentResource.ConfigManagement,
+ Port: componentResource.PortManagement,
+ Telescopic: componentResource.TelescopicManagement,
+ HealthyCheck: componentResource.HealthyCheckManagement,
+ ComponentK8sAttributes: componentResource.ComponentK8sAttributesManagement,
+ })
+ }
+ ar = api_model.AppComponent{
+ App: app,
+ K8sResources: k8sResource,
+ Component: componentAttributes,
+ }
+ return nil
+ })
+ if err != nil {
+ return api_model.AppComponent{}, &util.APIHandleError{Code: 400, Err: fmt.Errorf("app yaml resource import error:%v", err)}
+ }
+ logrus.Infof("AppYamlResourceImport end")
+ return ar, nil
+}
+
+//YamlToResource -
+func (c *clusterAction) YamlToResource(yamlResource api_model.YamlResource) []api_model.K8sResourceObject {
+ yamlDirectoryPath := path.Join("/grdata/package_build/temp/events", yamlResource.EventID, "*")
+ yamlFilesPath, _ := filepath.Glob(yamlDirectoryPath)
+ var fileBuildResourceList []api_model.K8sResourceObject
+ for _, yamlFilePath := range yamlFilesPath {
+ pathSplitList := strings.Split(yamlFilePath, "/")
+ fileName := pathSplitList[len(pathSplitList)-1]
+ yamlFileBytes, err := ioutil.ReadFile(yamlFilePath)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: nil,
+ Error: err.Error(),
+ })
+ continue
+ }
+ dc, err := dynamic.NewForConfig(c.config)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: nil,
+ Error: err.Error(),
+ })
+ continue
+ }
+ decoder := yamlt.NewYAMLOrJSONDecoder(bytes.NewReader(yamlFileBytes), 1000)
+ var buildResourceList []api_model.BuildResource
+ for {
+ var rawObj runtime.RawExtension
+ if err = decoder.Decode(&rawObj); err != nil {
+ if err.Error() == "EOF" {
+ break
+ }
+ logrus.Errorf("%v", err)
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: nil,
+ Error: err.Error(),
+ })
+ break
+ }
+ obj, gvk, err := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: nil,
+ Error: err.Error(),
+ })
+ break
+ }
+ unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: nil,
+ Error: err.Error(),
+ })
+ break
+ }
+ unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}
+ buildResourceList = append(buildResourceList, api_model.BuildResource{
+ Resource: unstructuredObj,
+ State: api_model.CreateError,
+ ErrorOverview: "",
+ Dri: nil,
+ DC: dc,
+ GVK: gvk,
+ })
+ }
+ fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{
+ FileName: fileName,
+ BuildResources: buildResourceList,
+ Error: "",
+ })
+ }
+ return fileBuildResourceList
+}
+
+//ResourceCreate -
+func (c *clusterAction) ResourceCreate(buildResource api_model.BuildResource, namespace string) (*unstructured.Unstructured, error) {
+ mapping, err := c.mapper.RESTMapping(buildResource.GVK.GroupKind(), buildResource.GVK.Version)
+ if err != nil {
+ logrus.Errorf("%v", err)
+ return nil, err
+ }
+ if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
+ buildResource.Resource.SetNamespace(namespace)
+ buildResource.Dri = buildResource.DC.Resource(mapping.Resource).Namespace(buildResource.Resource.GetNamespace())
+ } else {
+ buildResource.Dri = buildResource.DC.Resource(mapping.Resource)
+ }
+ obj, err := buildResource.Dri.Create(context.Background(), buildResource.Resource, metav1.CreateOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return obj, nil
+}
diff --git a/api/model/component.go b/api/model/component.go
index ac3435b4d..95e856ded 100644
--- a/api/model/component.go
+++ b/api/model/component.go
@@ -241,23 +241,24 @@ func (e *ComponentEnv) DbModel(tenantID, componentID string) *dbmodel.TenantServ
// Component All attributes related to the component
type Component struct {
- ComponentBase ComponentBase `json:"component_base"`
- HTTPRules []AddHTTPRuleStruct `json:"http_rules"`
- TCPRules []AddTCPRuleStruct `json:"tcp_rules"`
- HTTPRuleConfigs []HTTPRuleConfig `json:"http_rule_configs"`
- Monitors []AddServiceMonitorRequestStruct `json:"monitors"`
- Ports []TenantServicesPort `json:"ports"`
- Relations []TenantComponentRelation `json:"relations"`
- Envs []ComponentEnv `json:"envs"`
- Probes []ServiceProbe `json:"probes"`
- AppConfigGroupRels []AppConfigGroupRelations `json:"app_config_groups"`
- Labels []ComponentLabel `json:"labels"`
- Plugins []ComponentPlugin `json:"plugins"`
- AutoScaleRule AutoScalerRule `json:"auto_scale_rule"`
- ConfigFiles []ComponentConfigFile `json:"config_files"`
- VolumeRelations []VolumeRelation `json:"volume_relations"`
- Volumes []ComponentVolume `json:"volumes"`
- Endpoint *Endpoints `json:"endpoint"`
+ ComponentBase ComponentBase `json:"component_base"`
+ HTTPRules []AddHTTPRuleStruct `json:"http_rules"`
+ TCPRules []AddTCPRuleStruct `json:"tcp_rules"`
+ HTTPRuleConfigs []HTTPRuleConfig `json:"http_rule_configs"`
+ Monitors []AddServiceMonitorRequestStruct `json:"monitors"`
+ Ports []TenantServicesPort `json:"ports"`
+ Relations []TenantComponentRelation `json:"relations"`
+ Envs []ComponentEnv `json:"envs"`
+ Probes []ServiceProbe `json:"probes"`
+ AppConfigGroupRels []AppConfigGroupRelations `json:"app_config_groups"`
+ Labels []ComponentLabel `json:"labels"`
+ Plugins []ComponentPlugin `json:"plugins"`
+ AutoScaleRule AutoScalerRule `json:"auto_scale_rule"`
+ ConfigFiles []ComponentConfigFile `json:"config_files"`
+ VolumeRelations []VolumeRelation `json:"volume_relations"`
+ Volumes []ComponentVolume `json:"volumes"`
+ Endpoint *Endpoints `json:"endpoint"`
+ ComponentK8sAttributes []ComponentK8sAttribute `json:"component_k8s_attributes"`
}
// SyncComponentReq -
@@ -265,3 +266,34 @@ type SyncComponentReq struct {
Components []*Component `json:"components"`
DeleteComponentIDs []string `json:"delete_component_ids"`
}
+
+// ComponentK8sAttribute -
+type ComponentK8sAttribute struct {
+ // Name Define the attribute name, which is currently supported
+ // [nodeSelector/labels/tolerations/volumes/serviceAccountName/privileged/affinity/volumeMounts]
+ // The field name should be the same as that in the K8s resource yaml file.
+ Name string `json:"name"`
+
+ // The field type defines how the attribute is stored. Currently, `json/yaml/string` are supported
+ SaveType string `json:"save_type"`
+
+ // Define the attribute value, which is stored in the database.
+ // The value is stored in the database in the form of `json/yaml/string`.
+ AttributeValue string `json:"attribute_value"`
+}
+
+// DbModel return database model
+func (k *ComponentK8sAttribute) DbModel(tenantID, componentID string) *dbmodel.ComponentK8sAttributes {
+ return &dbmodel.ComponentK8sAttributes{
+ TenantID: tenantID,
+ ComponentID: componentID,
+ Name: k.Name,
+ SaveType: k.SaveType,
+ AttributeValue: k.AttributeValue,
+ }
+}
+
+// DeleteK8sAttributeReq -
+type DeleteK8sAttributeReq struct {
+ Name string `json:"name"`
+}
diff --git a/api/model/convert_resource.go b/api/model/convert_resource.go
new file mode 100644
index 000000000..3c816227c
--- /dev/null
+++ b/api/model/convert_resource.go
@@ -0,0 +1,133 @@
+package model
+
+import (
+ "github.com/goodrain/rainbond/db/model"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+//BasicManagement -
+type BasicManagement struct {
+ ResourceType string `json:"resource_type"`
+ Replicas *int32 `json:"replicas"`
+ Image string `json:"image"`
+ Memory int64 `json:"memory"`
+ Cmd string `json:"command"`
+ CPU int64 `json:"cpu"`
+ JobStrategy JobStrategy `json:"job_strategy"`
+}
+
+//PortManagement -
+type PortManagement struct {
+ Port int32 `json:"port"`
+ Protocol string `json:"protocol"`
+ Inner bool `json:"inner"`
+ Outer bool `json:"outer"`
+}
+
+//ENVManagement -
+type ENVManagement struct {
+ ENVKey string `json:"env_key"`
+ ENVValue string `json:"env_value"`
+ ENVExplain string `json:"env_explain"`
+}
+
+//ConfigManagement -
+type ConfigManagement struct {
+ ConfigName string `json:"config_name"`
+ ConfigPath string `json:"config_path"`
+ Mode int32 `json:"mode"`
+ ConfigValue string `json:"config_value"`
+}
+
+//HealthyCheckManagement -
+type HealthyCheckManagement struct {
+ Status int `json:"status"`
+ ProbeID string `json:"probe_id"`
+ Port int `json:"port"`
+ Path string `json:"path"`
+ HTTPHeader string `json:"http_header"`
+ Command string `json:"cmd"`
+ DetectionMethod string `json:"detection_method"`
+ Mode string `json:"mode"`
+ InitialDelaySecond int `json:"initial_delay_second"`
+ PeriodSecond int `json:"period_second"`
+ TimeoutSecond int `json:"timeout_second"`
+ SuccessThreshold int `json:"success_threshold"`
+ FailureThreshold int `json:"failure_threshold"`
+}
+
+//TelescopicManagement -
+type TelescopicManagement struct {
+ Enable bool `json:"enable"`
+ RuleID string `json:"rule_id"`
+ MinReplicas int32 `json:"min_replicas"`
+ MaxReplicas int32 `json:"max_replicas"`
+ CPUOrMemory []*model.TenantServiceAutoscalerRuleMetrics `json:"cpu_or_memory"`
+}
+
+//KubernetesResources -
+type KubernetesResources struct {
+ Name string `json:"name"`
+ Spec v1.ServiceSpec `json:"spec"`
+ Namespace string `json:"namespace"`
+ Labels map[string]string `json:"labels"`
+ Annotations map[string]string `json:"annotations"`
+ Kind string `json:"kind"`
+ APIVersion string `json:"api_version"`
+ GenerateName string `json:"generate_name"`
+ UID types.UID `json:"uid"`
+ ResourceVersion string `json:"resource_version"`
+ Generation int64 `json:"generation"`
+ CreationTimestamp metav1.Time `json:"creation_timestamp"`
+ DeletionTimestamp *metav1.Time `json:"deletion_timestamp"`
+ DeletionGracePeriodSeconds *int64 `json:"deletion_grace_period_seconds"`
+ OwnerReferences []metav1.OwnerReference `json:"owner_references"`
+ Finalizers []string `json:"finalizers"`
+ ClusterName string `json:"cluster_name"`
+}
+
+//ApplicationResource -
+type ApplicationResource struct {
+ KubernetesResources []model.K8sResource `json:"kubernetes_resources"`
+ ConvertResource []ConvertResource `json:"convert_resource"`
+}
+
+//ConvertResource -
+type ConvertResource struct {
+ ComponentsName string `json:"components_name"`
+ BasicManagement BasicManagement `json:"basic_management"`
+ PortManagement []PortManagement `json:"port_management"`
+ ENVManagement []ENVManagement `json:"env_management"`
+ ConfigManagement []ConfigManagement `json:"config_management"`
+ HealthyCheckManagement HealthyCheckManagement `json:"health_check_management"`
+ TelescopicManagement TelescopicManagement `json:"telescopic_management"`
+ ComponentK8sAttributesManagement []*model.ComponentK8sAttributes `json:"component_k8s_attributes_management"`
+}
+
+//ComponentAttributes -
+type ComponentAttributes struct {
+ TS *model.TenantServices `json:"ts"`
+ Image string `json:"image"`
+ Cmd string `json:"cmd"`
+ ENV []ENVManagement `json:"env"`
+ Config []ConfigManagement `json:"config"`
+ Port []PortManagement `json:"port"`
+ Telescopic TelescopicManagement `json:"telescopic"`
+ HealthyCheck HealthyCheckManagement `json:"healthy_check"`
+ ComponentK8sAttributes []*model.ComponentK8sAttributes `json:"component_k8s_attributes"`
+}
+
+//AppComponent -
+type AppComponent struct {
+ App *model.Application `json:"app"`
+ K8sResources []model.K8sResource `json:"k8s_resources"`
+ Component []ComponentAttributes `json:"component"`
+}
+
+//ReturnResourceImport -
+type ReturnResourceImport struct {
+ Tenant *model.Tenants `json:"tenant"`
+ App []AppComponent `json:"app"`
+}
diff --git a/api/model/model.go b/api/model/model.go
index fd8112390..81624d7f9 100644
--- a/api/model/model.go
+++ b/api/model/model.go
@@ -359,6 +359,7 @@ type ServiceStruct struct {
HTTPRules []AddHTTPRuleStruct `json:"http_rules" validate:"http_rules"`
TCPRules []AddTCPRuleStruct `json:"tcp_rules" validate:"tcp_rules"`
K8sComponentName string `json:"k8s_component_name" validate:"k8s_component_name"`
+ JobStrategy string `json:"job_strategy" validate:"job_strategy"`
}
// Endpoints holds third-party service endpoints or configuraion to get endpoints.
@@ -449,6 +450,55 @@ type DeleteServicePort struct {
Port int `json:"port"`
}
+//AddHandleResource -
+type AddHandleResource struct {
+ Namespace string `json:"namespace"`
+ AppID string `json:"app_id"`
+ ResourceYaml string `json:"resource_yaml"`
+}
+
+//HandleResource -
+type HandleResource struct {
+ Name string `json:"name"`
+ AppID string `json:"app_id"`
+ Kind string `json:"kind"`
+ Namespace string `json:"namespace"`
+ ResourceYaml string `json:"resource_yaml"`
+}
+
+// SyncResources -
+type SyncResources struct {
+ K8sResources []HandleResource `json:"k8s_resources"`
+}
+
+//YamlResource -
+type YamlResource struct {
+ EventID string `json:"event_id"`
+ AppID string `json:"region_app_id"`
+ TenantID string `json:"tenant_id"`
+ Namespace string `json:"namespace"`
+}
+
+const (
+ //CreateSuccess -
+ CreateSuccess = 1
+ //UpdateSuccess -
+ UpdateSuccess = 2
+ //CreateError -
+ CreateError = 3
+ //UpdateError -
+ UpdateError = 4
+)
+
+// JobStrategy -
+type JobStrategy struct {
+ Schedule string `json:"schedule"`
+ BackoffLimit string `json:"backoff_limit"`
+ Parallelism string `json:"parallelism"`
+ ActiveDeadlineSeconds string `json:"active_deadline_seconds"`
+ Completions string `json:"completions"`
+}
+
//TenantResources TenantResources
// swagger:parameters tenantResources
type TenantResources struct {
@@ -980,7 +1030,7 @@ type ServiceCheckStruct struct {
//检测来源类型
// in: body
// required: true
- SourceType string `json:"source_type" validate:"source_type|required|in:docker-run,docker-compose,sourcecode,third-party-service"`
+ SourceType string `json:"source_type" validate:"source_type|required|in:docker-run,docker-compose,sourcecode,third-party-service,package_build"`
CheckOS string `json:"check_os"`
// 检测来源定义,
diff --git a/api/model/namespace_resource.go b/api/model/namespace_resource.go
new file mode 100644
index 000000000..66f870ed1
--- /dev/null
+++ b/api/model/namespace_resource.go
@@ -0,0 +1,32 @@
+package model
+
+const (
+ //Deployment -
+ Deployment = "Deployment"
+ //Job -
+ Job = "Job"
+ //CronJob -
+ CronJob = "CronJob"
+ //StateFulSet -
+ StateFulSet = "StatefulSet"
+ //Service -
+ Service = "Service"
+ //PVC -
+ PVC = "PersistentVolumeClaim"
+ //Ingress -
+ Ingress = "Ingress"
+ //NetworkPolicy -
+ NetworkPolicy = "NetworkPolicy"
+ //ConfigMap -
+ ConfigMap = "ConfigMap"
+ //Secret -
+ Secret = "Secret"
+ //ServiceAccount -
+ ServiceAccount = "ServiceAccount"
+ //RoleBinding -
+ RoleBinding = "RoleBinding"
+ //HorizontalPodAutoscaler -
+ HorizontalPodAutoscaler = "HorizontalPodAutoscaler"
+ //Role -
+ Role = "Role"
+)
diff --git a/api/model/resource_handle.go b/api/model/resource_handle.go
new file mode 100644
index 000000000..477211c1d
--- /dev/null
+++ b/api/model/resource_handle.go
@@ -0,0 +1,90 @@
+package model
+
+import (
+ autoscalingv1 "k8s.io/api/autoscaling/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/dynamic"
+)
+
+//LabelResource -
+type LabelResource struct {
+ Workloads WorkLoadsResource `json:"workloads,omitempty"`
+ Others OtherResource `json:"others,omitempty"`
+ UnSupport map[string][]string `json:"un_support"`
+ Status string `json:"status"`
+}
+
+//LabelWorkloadsResourceProcess -
+type LabelWorkloadsResourceProcess struct {
+ Deployments map[string][]string `json:"deployments,omitempty"`
+ Jobs map[string][]string `json:"jobs,omitempty"`
+ CronJobs map[string][]string `json:"cronJobs,omitempty"`
+ StateFulSets map[string][]string `json:"stateFulSets,omitempty"`
+}
+
+//LabelOthersResourceProcess -
+type LabelOthersResourceProcess struct {
+ Services map[string][]string `json:"services,omitempty"`
+ PVC map[string][]string `json:"PVC,omitempty"`
+ Ingresses map[string][]string `json:"ingresses,omitempty"`
+ NetworkPolicies map[string][]string `json:"networkPolicies,omitempty"`
+ ConfigMaps map[string][]string `json:"configMaps,omitempty"`
+ Secrets map[string][]string `json:"secrets,omitempty"`
+ ServiceAccounts map[string][]string `json:"serviceAccounts,omitempty"`
+ RoleBindings map[string][]string `json:"roleBindings,omitempty"`
+ HorizontalPodAutoscalers map[string][]string `json:"horizontalPodAutoscalers,omitempty"`
+ Roles map[string][]string `json:"roles,omitempty"`
+}
+
+//YamlResourceParameter -
+type YamlResourceParameter struct {
+ ComponentsCR *[]ConvertResource
+ Basic BasicManagement
+ Template corev1.PodTemplateSpec
+ Namespace string
+ Name string
+ RsLabel map[string]string
+ CMs []corev1.ConfigMap
+ HPAs []autoscalingv1.HorizontalPodAutoscaler
+}
+
+//K8sResourceObject -
+type K8sResourceObject struct {
+ FileName string
+ BuildResources []BuildResource
+ Error string
+}
+
+//WorkLoadsResource -
+type WorkLoadsResource struct {
+ Deployments []string `json:"deployments,omitempty"`
+ Jobs []string `json:"jobs,omitempty"`
+ CronJobs []string `json:"cron_jobs,omitempty"`
+ StateFulSets []string `json:"state_ful_sets,omitempty"`
+}
+
+//BuildResource -
+type BuildResource struct {
+ Resource *unstructured.Unstructured
+ State int
+ ErrorOverview string
+ Dri dynamic.ResourceInterface
+ DC dynamic.Interface
+ GVK *schema.GroupVersionKind
+}
+
+//OtherResource -
+type OtherResource struct {
+ Services []string `json:"services,omitempty"`
+ PVC []string `json:"pvc,omitempty"`
+ Ingresses []string `json:"ingresses,omitempty"`
+ NetworkPolicies []string `json:"network_policies,omitempty"`
+ ConfigMaps []string `json:"config_maps,omitempty"`
+ Secrets []string `json:"secrets,omitempty"`
+ ServiceAccounts []string `json:"service_accounts,omitempty"`
+ RoleBindings []string `json:"role_bindings,omitempty"`
+ HorizontalPodAutoscalers []string `json:"horizontal_pod_autoscalers,omitempty"`
+ Roles []string `json:"roles,omitempty"`
+}
diff --git a/api/server/api.go b/api/server/api.go
index b6e7af53c..0b46174a7 100644
--- a/api/server/api.go
+++ b/api/server/api.go
@@ -173,6 +173,7 @@ func (m *Manager) Run() {
websocketRouter.Mount("/", websocket.Routes())
websocketRouter.Mount("/logs", websocket.LogRoutes())
websocketRouter.Mount("/app", websocket.AppRoutes())
+ websocketRouter.Mount("/package_build", websocket.PackageBuildRoutes())
if m.conf.WebsocketSSL {
logrus.Infof("websocket listen on (HTTPs) %s", m.conf.WebsocketAddr)
logrus.Fatal(http.ListenAndServeTLS(m.conf.WebsocketAddr, m.conf.WebsocketCertFile, m.conf.WebsocketKeyFile, websocketRouter))
diff --git a/builder/build/code_build.go b/builder/build/code_build.go
index 756834c58..adbdd0740 100644
--- a/builder/build/code_build.go
+++ b/builder/build/code_build.go
@@ -314,6 +314,13 @@ func (s *slugBuild) createVolumeAndMount(re *Request, sourceTarFileName string)
})
}
}
+ if re.ServerType == "pkg" {
+ volumeMounts = append(volumeMounts, corev1.VolumeMount{
+ Name: "slug",
+ MountPath: "/tmp/app",
+ SubPath: strings.TrimPrefix(re.RepositoryURL, "/grdata/"),
+ })
+ }
return volumes, volumeMounts
}
@@ -322,7 +329,7 @@ func (s *slugBuild) runBuildJob(re *Request) error {
re.Logger.Info(util.Translation("Start make code package"), map[string]string{"step": "build-exector"})
start := time.Now()
var sourceTarFileName string
- if re.ServerType != "oss" {
+ if re.ServerType != "oss" && re.ServerType != "pkg" {
var err error
sourceTarFileName, err = s.getSourceCodeTarFile(re)
if err != nil {
diff --git a/builder/exector/build_from_sourcecode_run.go b/builder/exector/build_from_sourcecode_run.go
index e0bc6e857..3821e2c69 100644
--- a/builder/exector/build_from_sourcecode_run.go
+++ b/builder/exector/build_from_sourcecode_run.go
@@ -21,6 +21,8 @@ package exector
import (
"context"
"fmt"
+ "github.com/goodrain/rainbond/builder/parser"
+ "io/ioutil"
"os"
"path"
"strings"
@@ -31,7 +33,6 @@ import (
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/build"
- "github.com/goodrain/rainbond/builder/parser"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/db"
@@ -95,6 +96,7 @@ func NewSouceCodeBuildItem(in []byte) *SourceCodeBuildItem {
Password: gjson.GetBytes(in, "password").String(),
TenantID: gjson.GetBytes(in, "tenant_id").String(),
ServiceID: gjson.GetBytes(in, "service_id").String(),
+ Configs: gjson.GetBytes(in, "configs").Map(),
}
envs := gjson.GetBytes(in, "envs").String()
be := make(map[string]string)
@@ -163,6 +165,69 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
}
case "oss":
i.commit = Commit{}
+ case "pkg":
+ var filePath string
+ pathSplit := strings.Split(i.CodeSouceInfo.RepositoryURL, "/")
+ eventID := pathSplit[len(pathSplit)-1]
+ // 存放目录
+ tarPath := fmt.Sprintf("/grdata/package_build/components/%s/events", i.ServiceID)
+ // 临时目录
+ oldPath := fmt.Sprintf("/grdata/package_build/temp/events/%s", eventID)
+ // 快速复制原目录
+ copyPath := i.CodeSouceInfo.Configs[i.ServiceID]
+ filePath = fmt.Sprintf("%s/%s", tarPath, eventID)
+
+ if copyPath.Str == "" {
+ files, err := ioutil.ReadDir(filePath)
+ if err != nil {
+ logrus.Errorf("read dir error: %s", err.Error())
+ return err
+ }
+ if len(files) == 0 {
+ filePath = oldPath
+ }
+ } else {
+ // 快速复制
+ splitCopyPath := strings.Split(copyPath.Str, "/")
+ splitRes := pathSplit[0 : len(splitCopyPath)-1]
+ modelPath := strings.Join(splitRes, "/")
+ tarCopyPath := fmt.Sprintf("/grdata/package_build/components/%s", i.ServiceID)
+ err := os.MkdirAll(tarCopyPath, 0755)
+ if err != nil {
+ return err
+ }
+ if err := util.CopyDir(modelPath, tarCopyPath); err != nil {
+ logrus.Errorf("copy dir error: %s", err.Error())
+ }
+ filePath = copyPath.Str
+ }
+ packages, err := ioutil.ReadDir(filePath)
+ if err != nil {
+ logrus.Errorf("read dir error: %s", err.Error())
+ return err
+ }
+ packageArr := make([]string, 0, 10)
+ for _, dir := range packages {
+ if dir.IsDir() {
+ continue
+ }
+ packageArr = append(packageArr, dir.Name())
+ }
+ if len(packageArr) != 0 {
+ fileName := packageArr[0]
+ file := filePath + "/" + fileName
+ fileMD5 := util.MD5(file)
+ i.commit = Commit{
+ Message: fileName,
+ Hash: fileMD5,
+ }
+ }
+ if copyPath.Str == "" {
+ if err = util.MoveDir(oldPath, tarPath); err != nil {
+ logrus.Errorf("copy dir error: %s", err.Error())
+ }
+ }
+
default:
//default git
rs, err := sources.GitCloneOrPull(i.CodeSouceInfo, rbi.GetCodeHome(), i.Logger, 5)
@@ -186,8 +251,10 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
}
// clean cache code
defer func() {
- if err := os.RemoveAll(rbi.GetCodeHome()); err != nil {
- logrus.Warningf("remove source code: %v", err)
+ if i.CodeSouceInfo.ServerType != "pkg" {
+ if err := os.RemoveAll(rbi.GetCodeHome()); err != nil {
+ logrus.Warningf("remove source code: %v", err)
+ }
}
}()
diff --git a/builder/exector/service_check.go b/builder/exector/service_check.go
index c392e7005..db94bf1ef 100644
--- a/builder/exector/service_check.go
+++ b/builder/exector/service_check.go
@@ -115,10 +115,12 @@ func (e *exectorManager) serviceCheck(task *pb.TaskMessage) {
yamlbody = string(yamlbyte)
}
pr = parser.CreateDockerComposeParse(yamlbody, e.DockerClient, input.Username, input.Password, logger)
- case "sourcecode":
+ case "sourcecode" :
pr = parser.CreateSourceCodeParse(input.SourceBody, logger)
case "third-party-service":
pr = parser.CreateThirdPartyServiceParse(input.SourceBody, logger)
+ case "package_build":
+ pr = parser.CreateSourceCodeParse(input.SourceBody, logger)
}
if pr == nil {
logger.Error("Creating component source types is not supported", map[string]string{"step": "callback", "status": "failure"})
diff --git a/builder/parser/code/specification.go b/builder/parser/code/specification.go
index 3c7b5e7ce..1ce4fd5b6 100644
--- a/builder/parser/code/specification.go
+++ b/builder/parser/code/specification.go
@@ -51,8 +51,15 @@ func init() {
//CheckCodeSpecification 检查语言规范
func CheckCodeSpecification(buildPath string, lang Lang, serverType string) Specification {
- if serverType == "oss" && lang == JavaJar {
- return common()
+ switch serverType {
+ case "oss":
+ if lang == JavaJar{
+ return common()
+ }
+ case "pkg":
+ if lang == JavaJar || lang == JaveWar{
+ return common()
+ }
}
if check, ok := specification[lang]; ok {
return check(buildPath)
diff --git a/builder/parser/source_code.go b/builder/parser/source_code.go
index 3eef132e5..b51d226a7 100644
--- a/builder/parser/source_code.go
+++ b/builder/parser/source_code.go
@@ -22,6 +22,7 @@ import (
"context"
"encoding/base64"
"fmt"
+ "io/ioutil"
"os"
"path"
"runtime"
@@ -110,9 +111,11 @@ func (d *SourceCodeParse) Parse() ParseErrorList {
}
// The source code is useless after the test is completed, and needs to be deleted.
defer func() {
- if sources.CheckFileExist(buildInfo.GetCodeHome()) {
- if err := sources.RemoveDir(buildInfo.GetCodeHome()); err != nil {
- logrus.Warningf("remove source code: %v", err)
+ if csi.ServerType != "pkg"{
+ if sources.CheckFileExist(buildInfo.GetCodeHome()) {
+ if err := sources.RemoveDir(buildInfo.GetCodeHome()); err != nil {
+ logrus.Warningf("remove source code: %v", err)
+ }
}
}
}()
@@ -203,6 +206,22 @@ func (d *SourceCodeParse) Parse() ParseErrorList {
d.branchs = rs.Branchs
return nil
}
+ packageFunc :=func() ParseErrorList{
+ var checkPath string
+ checkPath = buildInfo.RepostoryURL
+ pathSplit := strings.Split(buildInfo.RepostoryURL,"/")
+ eventID := pathSplit[len(pathSplit)-1]
+ files, err := ioutil.ReadDir(checkPath)
+ if err != nil {
+ logrus.Warn("check package error", err)
+ }
+ if len(files) == 0 {
+ // 第一次上传在临时目录下检测
+ checkPath = fmt.Sprintf("/grdata/package_build/temp/events/%s", eventID)
+ }
+ buildInfo.CodeHome = checkPath
+ return ParseErrorList{}
+ }
ossFunc := func() ParseErrorList {
g := got.NewWithContext(context.Background())
util.CheckAndCreateDir(buildInfo.GetCodeHome())
@@ -260,6 +279,10 @@ func (d *SourceCodeParse) Parse() ParseErrorList {
if err := ossFunc(); err != nil && err.IsFatalError() {
return err
}
+ case "pkg":
+ if err := packageFunc(); err != nil && err.IsFatalError() {
+ return err
+ }
default:
//default git
logrus.Warningf("do not get void server type,default use git")
diff --git a/builder/sources/git.go b/builder/sources/git.go
index ef1b8719d..4f30f71ce 100644
--- a/builder/sources/git.go
+++ b/builder/sources/git.go
@@ -21,6 +21,7 @@ package sources
import (
"context"
"fmt"
+ "github.com/tidwall/gjson"
"io/ioutil"
"net/http"
"net/url"
@@ -53,11 +54,12 @@ import (
//CodeSourceInfo 代码源信息
type CodeSourceInfo struct {
- ServerType string `json:"server_type"`
- RepositoryURL string `json:"repository_url"`
- Branch string `json:"branch"`
- User string `json:"user"`
- Password string `json:"password"`
+ ServerType string `json:"server_type"`
+ RepositoryURL string `json:"repository_url"`
+ Branch string `json:"branch"`
+ User string `json:"user"`
+ Password string `json:"password"`
+ Configs map[string]gjson.Result `json:"configs"`
//避免项目之间冲突,代码缓存目录提高到租户
TenantID string `json:"tenant_id"`
ServiceID string `json:"service_id"`
@@ -428,12 +430,11 @@ func GetPrivateFile(tenantID string) string {
}
if ok, _ := util.FileExists(path.Join(home, "/.ssh/"+tenantID)); ok {
return path.Join(home, "/.ssh/"+tenantID)
- } else {
- if ok, _ := util.FileExists(path.Join(home, "/.ssh/builder_rsa")); ok {
- return path.Join(home, "/.ssh/builder_rsa")
- }
- return path.Join(home, "/.ssh/id_rsa")
}
+ if ok, _ := util.FileExists(path.Join(home, "/.ssh/builder_rsa")); ok {
+ return path.Join(home, "/.ssh/builder_rsa")
+ }
+ return path.Join(home, "/.ssh/id_rsa")
}
diff --git a/builder/sources/repo.go b/builder/sources/repo.go
index 1f227b8f6..77f362f9d 100644
--- a/builder/sources/repo.go
+++ b/builder/sources/repo.go
@@ -92,6 +92,13 @@ func (r *RepostoryBuildInfo) GetProtocol() string {
//CreateRepostoryBuildInfo 创建源码编译信息
//repoType git or svn
func CreateRepostoryBuildInfo(repoURL, repoType, branch, tenantID string, ServiceID string) (*RepostoryBuildInfo, error) {
+ if repoType == "pkg" {
+ return &RepostoryBuildInfo{
+ RepostoryURL: repoURL,
+ RepostoryURLType: repoType,
+ CodeHome: repoURL,
+ }, nil
+ }
// repoURL= github.com/goodrain/xxx.git?dir=home
ep, err := transport.NewEndpoint(repoURL)
if err != nil {
diff --git a/cmd/api/server/server.go b/cmd/api/server/server.go
index 88566e1cf..bb0ed30ed 100644
--- a/cmd/api/server/server.go
+++ b/cmd/api/server/server.go
@@ -20,12 +20,11 @@ package server
import (
"context"
+ "k8s.io/client-go/restmapper"
"os"
"os/signal"
"syscall"
-
-
- rainbondscheme "github.com/goodrain/rainbond/pkg/generated/clientset/versioned/scheme"
+
"github.com/goodrain/rainbond/api/controller"
"github.com/goodrain/rainbond/api/db"
"github.com/goodrain/rainbond/api/discover"
@@ -34,6 +33,7 @@ import (
"github.com/goodrain/rainbond/cmd/api/option"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/pkg/generated/clientset/versioned"
+ rainbondscheme "github.com/goodrain/rainbond/pkg/generated/clientset/versioned/scheme"
etcdutil "github.com/goodrain/rainbond/util/etcd"
k8sutil "github.com/goodrain/rainbond/util/k8s"
"github.com/goodrain/rainbond/worker/client"
@@ -79,6 +79,7 @@ func Run(s *option.APIServer) error {
if err != nil {
return err
}
+
rainbondClient := versioned.NewForConfigOrDie(config)
// k8s runtime client
@@ -91,6 +92,12 @@ func Run(s *option.APIServer) error {
if err != nil {
return errors.WithMessage(err, "create k8s client")
}
+ // rest mapper
+ gr, err := restmapper.GetAPIGroupResources(clientset)
+ if err != nil {
+ return err
+ }
+ mapper := restmapper.NewDiscoveryRESTMapper(gr)
if err := event.NewManager(event.EventConfig{
EventLogServers: s.Config.EventLogServers,
@@ -121,7 +128,7 @@ func Run(s *option.APIServer) error {
//初始化 middleware
handler.InitProxy(s.Config)
//创建handle
- if err := handler.InitHandle(s.Config, etcdClientArgs, cli, etcdcli, clientset, rainbondClient, k8sClient); err != nil {
+ if err := handler.InitHandle(s.Config, etcdClientArgs, cli, etcdcli, clientset, rainbondClient, k8sClient, config, mapper); err != nil {
logrus.Errorf("init all handle error, %v", err)
return err
}
diff --git a/db/dao/dao.go b/db/dao/dao.go
index 4cdcc1745..ca4ad5280 100644
--- a/db/dao/dao.go
+++ b/db/dao/dao.go
@@ -77,6 +77,7 @@ type ApplicationDao interface {
GetByServiceID(sid string) (*model.Application, error)
ListByAppIDs(appIDs []string) ([]*model.Application, error)
IsK8sAppDuplicate(tenantID, AppID, k8sApp string) bool
+ GetAppByName(tenantID, k8sAppName string) (*model.Application, error)
}
//AppConfigGroupDao Application config group Dao
@@ -621,3 +622,21 @@ type TenantServiceMonitorDao interface {
DeleteByComponentIDs(componentIDs []string) error
CreateOrUpdateMonitorInBatch(monitors []*model.TenantServiceMonitor) error
}
+
+// ComponentK8sAttributeDao -
+type ComponentK8sAttributeDao interface {
+ Dao
+ GetByComponentIDAndName(componentID, name string) (*model.ComponentK8sAttributes, error)
+ CreateOrUpdateAttributesInBatch(attributes []*model.ComponentK8sAttributes) error
+ DeleteByComponentIDAndName(componentID, name string) error
+ DeleteByComponentIDs(componentIDs []string) error
+}
+
+// K8sResourceDao -
+type K8sResourceDao interface {
+ Dao
+ ListByAppID(appID string) ([]model.K8sResource, error)
+ CreateK8sResourceInBatch(k8sResources []*model.K8sResource) error
+ DeleteK8sResourceInBatch(appID, name string, kind string) error
+ GetK8sResourceByNameInBatch(appID, name, kind string) ([]model.K8sResource, error)
+}
diff --git a/db/db.go b/db/db.go
index 608877fa0..45089c150 100644
--- a/db/db.go
+++ b/db/db.go
@@ -47,6 +47,8 @@ type Manager interface {
AppConfigGroupServiceDaoTransactions(db *gorm.DB) dao.AppConfigGroupServiceDao
AppConfigGroupItemDao() dao.AppConfigGroupItemDao
AppConfigGroupItemDaoTransactions(db *gorm.DB) dao.AppConfigGroupItemDao
+ K8sResourceDao() dao.K8sResourceDao
+ K8sResourceDaoTransactions(db *gorm.DB) dao.K8sResourceDao
EnterpriseDao() dao.EnterpriseDao
TenantDao() dao.TenantDao
TenantDaoTransactions(db *gorm.DB) dao.TenantDao
@@ -137,6 +139,9 @@ type Manager interface {
TenantServiceMonitorDao() dao.TenantServiceMonitorDao
TenantServiceMonitorDaoTransactions(db *gorm.DB) dao.TenantServiceMonitorDao
+
+ ComponentK8sAttributeDao() dao.ComponentK8sAttributeDao
+ ComponentK8sAttributeDaoTransactions(db *gorm.DB) dao.ComponentK8sAttributeDao
}
var defaultManager Manager
diff --git a/db/model/application.go b/db/model/application.go
index 83cc82a7e..ef39ddf8d 100644
--- a/db/model/application.go
+++ b/db/model/application.go
@@ -1,3 +1,21 @@
+// RAINBOND, Application Management Platform
+// Copyright (C) 2020-2022 Goodrain Co., Ltd.
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version. For any non-GPL usage of Rainbond,
+// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
+// must be obtained first.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
package model
const (
@@ -73,7 +91,27 @@ type ApplicationConfigGroup struct {
Enable bool `gorm:"column:enable" json:"enable"`
}
-// TableName return tableName "application"
+// TableName return tableName "app_config_group"
func (t *ApplicationConfigGroup) TableName() string {
return "app_config_group"
}
+
+// K8sResource Save k8s resources under the application
+type K8sResource struct {
+ Model
+ AppID string `gorm:"column:app_id" json:"app_id"`
+ Name string `gorm:"column:name" json:"name"`
+ // The resource kind is the same as that in k8s cluster
+ Kind string `gorm:"column:kind" json:"kind"`
+ // Yaml file for the storage resource
+ Content string `gorm:"column:content;type:longtext" json:"content"`
+ // resource create error overview
+ ErrorOverview string `gorm:"column:status;type:longtext" json:"error_overview"`
+ //whether it was created successfully
+ State int `gorm:"column:success;type:int" json:"state"`
+}
+
+// TableName return tableName "k8s_resources"
+func (k *K8sResource) TableName() string {
+ return "k8s_resources"
+}
diff --git a/db/model/component.go b/db/model/component.go
new file mode 100644
index 000000000..1c924f1b3
--- /dev/null
+++ b/db/model/component.go
@@ -0,0 +1,64 @@
+// RAINBOND, Application Management Platform
+// Copyright (C) 2022-2022 Goodrain Co., Ltd.
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version. For any non-GPL usage of Rainbond,
+// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
+// must be obtained first.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+package model
+
+const (
+ //K8sAttributeNameNodeSelector -
+ K8sAttributeNameNodeSelector = "nodeSelector"
+ //K8sAttributeNameLabels -
+ K8sAttributeNameLabels = "labels"
+ //K8sAttributeNameTolerations -
+ K8sAttributeNameTolerations = "tolerations"
+ //K8sAttributeNameVolumes -
+ K8sAttributeNameVolumes = "volumes"
+ //K8sAttributeNameServiceAccountName -
+ K8sAttributeNameServiceAccountName = "serviceAccountName"
+ //K8sAttributeNamePrivileged -
+ K8sAttributeNamePrivileged = "privileged"
+ //K8sAttributeNameAffinity -
+ K8sAttributeNameAffinity = "affinity"
+ //K8sAttributeNameVolumeMounts -
+ K8sAttributeNameVolumeMounts = "volumeMounts"
+ //K8sAttributeNameENV -
+ K8sAttributeNameENV = "env"
+)
+
+// ComponentK8sAttributes -
+type ComponentK8sAttributes struct {
+ Model
+ TenantID string `gorm:"column:tenant_id;size:32" validate:"tenant_id|between:30,33" json:"tenant_id"`
+ ComponentID string `gorm:"column:component_id" json:"component_id"`
+
+ // Name Define the attribute name, which is currently supported
+ // [nodeSelector/labels/tolerations/volumes/serviceAccountName/privileged/affinity/volumeMounts]
+ // The field name should be the same as that in the K8s resource yaml file.
+ Name string `gorm:"column:name" json:"name"`
+
+ // The field type defines how the attribute is stored. Currently, `json/yaml/string` are supported
+ SaveType string `gorm:"column:save_type" json:"save_type"`
+
+ // Define the attribute value, which is stored in the database.
+ // The value is stored in the database in the form of `json/yaml/string`.
+ AttributeValue string `gorm:"column:attribute_value;type:longtext" json:"attribute_value"`
+}
+
+// TableName 表名
+func (t *ComponentK8sAttributes) TableName() string {
+ return "component_k8s_attributes"
+}
diff --git a/db/model/tenant.go b/db/model/tenant.go
index c7c4ef797..66c7abb18 100644
--- a/db/model/tenant.go
+++ b/db/model/tenant.go
@@ -107,6 +107,22 @@ func (s ServiceType) IsState() bool {
return false
}
+// IsJob is job
+func (s ServiceType) IsJob() bool {
+ if s == ServiceTypeJob {
+ return true
+ }
+ return false
+}
+
+// IsCronJob is cronjob
+func (s ServiceType) IsCronJob() bool {
+ if s == ServiceTypeCronJob {
+ return true
+ }
+ return false
+}
+
// IsSingleton is singleton or not
func (s ServiceType) IsSingleton() bool {
if s == "" {
@@ -127,6 +143,22 @@ func (t *TenantServices) IsState() bool {
return ServiceType(t.ExtendMethod).IsState()
}
+// IsJob is job
+func (t *TenantServices) IsJob() bool {
+ if ServiceType(t.ExtendMethod).IsJob() {
+ return true
+ }
+ return false
+}
+
+// IsCronJob is cronjob
+func (t *TenantServices) IsCronJob() bool {
+ if ServiceType(t.ExtendMethod).IsCronJob() {
+ return true
+ }
+ return false
+}
+
// IsSingleton is singleton or multiple service
func (t *TenantServices) IsSingleton() bool {
if t.ExtendMethod == "" {
@@ -150,6 +182,12 @@ var ServiceTypeStateSingleton ServiceType = "state_singleton"
// ServiceTypeStateMultiple state_multiple
var ServiceTypeStateMultiple ServiceType = "state_multiple"
+// ServiceTypeJob job
+var ServiceTypeJob ServiceType = "job"
+
+// ServiceTypeCronJob cronjob
+var ServiceTypeCronJob ServiceType = "cronjob"
+
//TenantServices app service base info
type TenantServices struct {
Model
@@ -203,6 +241,8 @@ type TenantServices struct {
AppID string `gorm:"column:app_id" json:"app_id"`
// Component name in cluster
K8sComponentName string `gorm:"column:k8s_component_name" json:"k8s_component_name"`
+ // Job任务策略
+ JobStrategy string `gorm:"column:job_strategy" json:"job_strategy"`
}
// ComponentWorkload -
@@ -324,6 +364,8 @@ type TenantServicesDelete struct {
AppID string `gorm:"column:app_id" json:"app_id"`
// Component name in cluster
K8sComponentName string `gorm:"column:k8s_component_name" json:"k8s_component_name"`
+ // Job任务策略
+ JobStrategy string `gorm:"column:job_strategy" json:"job_strategy"`
}
//TableName 表名
diff --git a/db/mysql/dao/application.go b/db/mysql/dao/application.go
index 2ae77d772..d0c29fb9d 100644
--- a/db/mysql/dao/application.go
+++ b/db/mysql/dao/application.go
@@ -105,3 +105,15 @@ func (a *ApplicationDaoImpl) IsK8sAppDuplicate(tenantID, AppID, k8sApp string) b
}
return count > 0
}
+
+//GetAppByName -
+func (a *ApplicationDaoImpl) GetAppByName(tenantID, k8sAppName string) (*model.Application, error) {
+ var app model.Application
+ if err := a.DB.Where("tenant_id=? and k8s_app=?", tenantID, k8sAppName).Find(&app).Error; err != nil {
+ if err == gorm.ErrRecordNotFound {
+ return nil, bcode.ErrApplicationNotFound
+ }
+ return nil, err
+ }
+ return &app, nil
+}
diff --git a/db/mysql/dao/k8s_resource.go b/db/mysql/dao/k8s_resource.go
new file mode 100644
index 000000000..62e1b10a3
--- /dev/null
+++ b/db/mysql/dao/k8s_resource.go
@@ -0,0 +1,81 @@
+// RAINBOND, Application Management Platform
+// Copyright (C) 2022-2022 Goodrain Co., Ltd.
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version. For any non-GPL usage of Rainbond,
+// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
+// must be obtained first.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+package dao
+
+import (
+ "fmt"
+ gormbulkups "github.com/atcdot/gorm-bulk-upsert"
+ "github.com/goodrain/rainbond/db/model"
+ "github.com/jinzhu/gorm"
+ pkgerr "github.com/pkg/errors"
+)
+
+// K8sResourceDaoImpl k8s resource dao
+type K8sResourceDaoImpl struct {
+ DB *gorm.DB
+}
+
+// AddModel add model
+func (t *K8sResourceDaoImpl) AddModel(mo model.Interface) error {
+ return nil
+}
+
+// UpdateModel update model
+func (t *K8sResourceDaoImpl) UpdateModel(mo model.Interface) error {
+ resource, ok := mo.(*model.K8sResource)
+ if !ok {
+ return fmt.Errorf("mo.(*model.K8sResource) err")
+ }
+ return t.DB.Save(resource).Error
+}
+
+// ListByAppID list by app id
+func (t *K8sResourceDaoImpl) ListByAppID(appID string) ([]model.K8sResource, error) {
+ var resources []model.K8sResource
+ if err := t.DB.Where("app_id = ?", appID).Find(&resources).Error; err != nil {
+ return nil, err
+ }
+ return resources, nil
+}
+
+//CreateK8sResourceInBatch -
+func (t *K8sResourceDaoImpl) CreateK8sResourceInBatch(k8sResources []*model.K8sResource) error {
+ var objects []interface{}
+ for _, cg := range k8sResources {
+ objects = append(objects, *cg)
+ }
+ if err := gormbulkups.BulkUpsert(t.DB, objects, 2000); err != nil {
+ return pkgerr.Wrap(err, "create K8sResource groups in batch")
+ }
+ return nil
+}
+
+//DeleteK8sResourceInBatch -
+func (t *K8sResourceDaoImpl) DeleteK8sResourceInBatch(appID, name string, kind string) error {
+ return t.DB.Where("app_id=? and name=? and kind=?", appID, name, kind).Delete(&model.K8sResource{}).Error
+}
+
+//GetK8sResourceByNameInBatch -
+func (t *K8sResourceDaoImpl) GetK8sResourceByNameInBatch(appID, name, kind string) ([]model.K8sResource, error) {
+ var resources []model.K8sResource
+ if err := t.DB.Where("app_id=? and name=? and kind=?", appID, name, kind).Find(&resources).Error; err != nil {
+ return nil, err
+ }
+ return resources, nil
+}
diff --git a/db/mysql/dao/tenants.go b/db/mysql/dao/tenants.go
index 5c6970271..801db0c43 100644
--- a/db/mysql/dao/tenants.go
+++ b/db/mysql/dao/tenants.go
@@ -2019,3 +2019,55 @@ func (t *TenantServiceScalingRecordsDaoImpl) CountByServiceID(serviceID string)
return count, nil
}
+
+// ComponentK8sAttributeDaoImpl The K8s attribute value of the component
+type ComponentK8sAttributeDaoImpl struct {
+ DB *gorm.DB
+}
+
+// AddModel -
+func (t *ComponentK8sAttributeDaoImpl) AddModel(mo model.Interface) error {
+ attr := mo.(*model.ComponentK8sAttributes)
+ var old model.ComponentK8sAttributes
+ if ok := t.DB.Where("component_id=? and name=?", attr.ComponentID, attr.Name).Find(&old).RecordNotFound(); ok {
+ return t.DB.Create(attr).Error
+ }
+ return errors.ErrRecordAlreadyExist
+}
+
+// UpdateModel -
+func (t *ComponentK8sAttributeDaoImpl) UpdateModel(mo model.Interface) error {
+ attr := mo.(*model.ComponentK8sAttributes)
+ return t.DB.Save(attr).Error
+}
+
+// GetByComponentIDAndName -
+func (t *ComponentK8sAttributeDaoImpl) GetByComponentIDAndName(componentID, name string) (*model.ComponentK8sAttributes, error) {
+ var record model.ComponentK8sAttributes
+ if err := t.DB.Where("component_id=? and name=?", componentID, name).Take(&record).Error; err != nil {
+ return nil, err
+ }
+ return &record, nil
+}
+
+// CreateOrUpdateAttributesInBatch Batch insert or update component attributes
+func (t *ComponentK8sAttributeDaoImpl) CreateOrUpdateAttributesInBatch(attributes []*model.ComponentK8sAttributes) error {
+ var objects []interface{}
+ for _, attribute := range attributes {
+ objects = append(objects, *attribute)
+ }
+ if err := gormbulkups.BulkUpsert(t.DB, objects, 2000); err != nil {
+ return pkgerr.Wrap(err, "create or update component attributes in batch")
+ }
+ return nil
+}
+
+// DeleteByComponentIDAndName delete by componentID and name
+func (t *ComponentK8sAttributeDaoImpl) DeleteByComponentIDAndName(componentID, name string) error {
+ return t.DB.Where("component_id=? and name=?", componentID, name).Delete(&model.ComponentK8sAttributes{}).Error
+}
+
+// DeleteByComponentIDs delete by componentIDs
+func (t *ComponentK8sAttributeDaoImpl) DeleteByComponentIDs(componentIDs []string) error {
+ return t.DB.Where("component_id in (?)", componentIDs).Delete(&model.ComponentK8sAttributes{}).Error
+}
diff --git a/db/mysql/dao_impl.go b/db/mysql/dao_impl.go
index 6e80aaa1e..92e80a8aa 100644
--- a/db/mysql/dao_impl.go
+++ b/db/mysql/dao_impl.go
@@ -647,3 +647,31 @@ func (m *Manager) TenantServiceMonitorDaoTransactions(db *gorm.DB) dao.TenantSer
DB: db,
}
}
+
+// ComponentK8sAttributeDao -
+func (m *Manager) ComponentK8sAttributeDao() dao.ComponentK8sAttributeDao {
+ return &mysqldao.ComponentK8sAttributeDaoImpl{
+ DB: m.db,
+ }
+}
+
+// ComponentK8sAttributeDaoTransactions -
+func (m *Manager) ComponentK8sAttributeDaoTransactions(db *gorm.DB) dao.ComponentK8sAttributeDao {
+ return &mysqldao.ComponentK8sAttributeDaoImpl{
+ DB: db,
+ }
+}
+
+// K8sResourceDao -
+func (m *Manager) K8sResourceDao() dao.K8sResourceDao {
+ return &mysqldao.K8sResourceDaoImpl{
+ DB: m.db,
+ }
+}
+
+// K8sResourceDaoTransactions -
+func (m *Manager) K8sResourceDaoTransactions(db *gorm.DB) dao.K8sResourceDao {
+ return &mysqldao.K8sResourceDaoImpl{
+ DB: db,
+ }
+}
diff --git a/db/mysql/mysql.go b/db/mysql/mysql.go
index 8907b79b9..a3fd9981a 100644
--- a/db/mysql/mysql.go
+++ b/db/mysql/mysql.go
@@ -155,6 +155,8 @@ func (m *Manager) RegisterTableModel() {
m.models = append(m.models, &model.TenantServiceAutoscalerRuleMetrics{})
m.models = append(m.models, &model.TenantServiceScalingRecords{})
m.models = append(m.models, &model.TenantServiceMonitor{})
+ m.models = append(m.models, &model.ComponentK8sAttributes{})
+ m.models = append(m.models, &model.K8sResource{})
}
//CheckTable check and create tables
diff --git a/util/copydir.go b/util/copydir.go
new file mode 100644
index 000000000..088e4eafe
--- /dev/null
+++ b/util/copydir.go
@@ -0,0 +1,107 @@
+package util
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "github.com/sirupsen/logrus"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+)
+
+// FormatPath format path
+func FormatPath(s string) string {
+ log.Println("runtime.GOOS:", runtime.GOOS)
+ switch runtime.GOOS {
+ case "windows":
+ return strings.Replace(s, "/", "\\", -1)
+ case "darwin", "linux":
+ return strings.Replace(s, "\\", "/", -1)
+ default:
+ logrus.Info("only support linux,windows,darwin, but os is " + runtime.GOOS)
+ return s
+ }
+}
+
+// MoveDir move dir
+func MoveDir(src string, dest string) error {
+ src = FormatPath(src)
+ dest = FormatPath(dest)
+ logrus.Info("src", src)
+ logrus.Info("dest", dest)
+
+ var cmd *exec.Cmd
+
+ switch runtime.GOOS {
+ case "windows":
+ cmd = exec.Command("xcopy", src, dest, "/I", "/E")
+ case "darwin", "linux":
+ cmd = exec.Command("cp", "-R", src, dest)
+ }
+ outPut, err := cmd.Output()
+ if err != nil {
+ logrus.Errorf("Output error: %s", err.Error())
+ return err
+ }
+ fmt.Println(outPut)
+ if err := os.RemoveAll(src); err != nil {
+ logrus.Errorf("remove oldpath error: %s", err.Error())
+ return err
+ }
+ return nil
+}
+
+// MD5 md5
+func MD5(file string) string {
+ f, err := os.Open(file)
+ if err != nil {
+ logrus.Error(err)
+ }
+ defer f.Close()
+
+ h := md5.New()
+ _, err = io.Copy(h, f)
+ if err != nil {
+ logrus.Error(err)
+ }
+ res := hex.EncodeToString(h.Sum(nil))
+ logrus.Info("md5:", res)
+ return res
+}
+
+// CopyDir move dir
+func CopyDir(src string, dest string) error {
+ _, err := os.Stat(dest)
+ if err != nil {
+ if !os.IsExist(err) {
+ err := os.MkdirAll(dest, 0755)
+ if err != nil {
+ logrus.Error("make and copy dir error", err)
+ }
+ }
+ }
+ src = FormatPath(src)
+ dest = FormatPath(dest)
+ logrus.Info("src", src)
+ logrus.Info("dest", dest)
+
+ var cmd *exec.Cmd
+
+ switch runtime.GOOS {
+ case "windows":
+ cmd = exec.Command("xcopy", src, dest, "/I", "/E")
+ case "darwin", "linux":
+ cmd = exec.Command("cp", "-R", src, dest)
+ }
+ outPut, err := cmd.Output()
+ if err != nil {
+ logrus.Errorf("Output error: %s", err.Error())
+ return err
+ }
+ fmt.Println(outPut)
+ return nil
+}
\ No newline at end of file
diff --git a/worker/appm/controller/start.go b/worker/appm/controller/start.go
index f70189533..cdbcee06d 100644
--- a/worker/appm/controller/start.go
+++ b/worker/appm/controller/start.go
@@ -130,7 +130,7 @@ func (s *startController) startOne(app v1.AppService) error {
return fmt.Errorf("create claims: %v", err)
}
}
- //step 2: create statefulset or deployment
+ //step 2: create statefulset or deployment or job or cronjob
if statefulset := app.GetStatefulSet(); statefulset != nil {
_, err = s.manager.client.AppsV1().StatefulSets(app.GetNamespace()).Create(s.ctx, statefulset, metav1.CreateOptions{})
if err != nil {
@@ -143,6 +143,18 @@ func (s *startController) startOne(app v1.AppService) error {
return fmt.Errorf("create deployment failure:%s;", err.Error())
}
}
+ if job := app.GetJob(); job != nil {
+ _, err = s.manager.client.BatchV1().Jobs(app.GetNamespace()).Create(s.ctx, job, metav1.CreateOptions{})
+ if err != nil {
+ return fmt.Errorf("create job failure:%s;", err.Error())
+ }
+ }
+ if cronjob := app.GetCronJob(); cronjob != nil {
+ _, err = s.manager.client.BatchV1beta1().CronJobs(app.GetNamespace()).Create(s.ctx, cronjob, metav1.CreateOptions{})
+ if err != nil {
+ return fmt.Errorf("create cronjob failure:%s;", err.Error())
+ }
+ }
//step 3: create services
if services := app.GetServices(true); services != nil {
if err := CreateKubeService(s.manager.client, app.GetNamespace(), services...); err != nil {
diff --git a/worker/appm/controller/stop.go b/worker/appm/controller/stop.go
index 9351e54b7..964373366 100644
--- a/worker/appm/controller/stop.go
+++ b/worker/appm/controller/stop.go
@@ -162,6 +162,21 @@ func (s *stopController) stopOne(app v1.AppService) error {
}
s.manager.store.OnDeletes(deployment)
}
+ if job := app.GetJob(); job != nil {
+ err := s.manager.client.BatchV1().Jobs(app.GetNamespace()).Delete(s.ctx, job.Name, metav1.DeleteOptions{})
+ if err != nil && !errors.IsNotFound(err) {
+ return fmt.Errorf("delete job failure:%s", err.Error())
+ }
+ s.manager.store.OnDeletes(job)
+ }
+ if cronjob := app.GetCronJob(); cronjob != nil {
+ propagationPolicy := metav1.DeletePropagationBackground
+ err := s.manager.client.BatchV1beta1().CronJobs(app.GetNamespace()).Delete(s.ctx, cronjob.Name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
+ if err != nil && !errors.IsNotFound(err) {
+ return fmt.Errorf("delete cronjob failure:%s", err.Error())
+ }
+ s.manager.store.OnDeletes(cronjob)
+ }
//step 6: delete all pod
var gracePeriodSeconds int64
if pods := app.GetPods(true); pods != nil {
diff --git a/worker/appm/conversion/service.go b/worker/appm/conversion/service.go
index e7afa796e..efd95781e 100644
--- a/worker/appm/conversion/service.go
+++ b/worker/appm/conversion/service.go
@@ -19,10 +19,17 @@
package conversion
import (
+ "encoding/json"
"fmt"
+ "strconv"
+ "strings"
+
+ apimodel "github.com/goodrain/rainbond/api/model"
+
"github.com/goodrain/rainbond/api/handler/app_governance_mode/adaptor"
"github.com/sirupsen/logrus"
- "strings"
+ batchv1 "k8s.io/api/batch/v1"
+ "k8s.io/api/batch/v1beta1"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
@@ -130,6 +137,14 @@ func TenantServiceBase(as *v1.AppService, dbmanager db.Manager) error {
as.ContainerGPU = tenantService.ContainerGPU
as.ContainerMemory = tenantService.ContainerMemory
as.Replicas = tenantService.Replicas
+ if tenantService.IsJob() {
+ initBaseJob(as, tenantService)
+ return nil
+ }
+ if tenantService.IsCronJob() {
+ initBaseCronJob(as, tenantService)
+ return nil
+ }
if !tenantService.IsState() {
initBaseDeployment(as, tenantService)
return nil
@@ -216,6 +231,116 @@ func initBaseDeployment(as *v1.AppService, service *dbmodel.TenantServices) {
as.SetDeployment(deployment)
}
+func initBaseJob(as *v1.AppService, service *dbmodel.TenantServices) {
+ as.ServiceType = v1.TypeJob
+ job := as.GetJob()
+ if job == nil {
+ job = &batchv1.Job{}
+ }
+ job.Namespace = as.GetNamespace()
+ if job.Spec.Selector == nil {
+ job.Spec.Selector = &metav1.LabelSelector{}
+ }
+ job.Name = as.GetK8sWorkloadName()
+ job.GenerateName = strings.Replace(service.ServiceAlias, "_", "-", -1)
+ injectLabels := getInjectLabels(as)
+ job.Labels = as.GetCommonLabels(job.Labels, map[string]string{
+ "name": service.ServiceAlias,
+ "version": service.DeployVersion,
+ }, injectLabels)
+
+ var js *apimodel.JobStrategy
+ if service.JobStrategy != ""{
+ err := json.Unmarshal([]byte(service.JobStrategy), &js)
+ if err != nil {
+ logrus.Error("job strategy json unmarshal error", err)
+ }
+ if js.ActiveDeadlineSeconds != "" {
+ ads, err := strconv.ParseInt(js.ActiveDeadlineSeconds, 10, 64)
+ if err == nil {
+ job.Spec.ActiveDeadlineSeconds = &ads
+ }
+ }
+ if js.BackoffLimit != "" {
+ res, err := strconv.ParseInt(js.BackoffLimit, 10, 32)
+ if err == nil {
+ bkl := int32(res)
+ job.Spec.BackoffLimit = &bkl
+ }
+ }
+ if js.Parallelism != "" {
+ res, err := strconv.ParseInt(js.Parallelism, 10, 32)
+ if err == nil {
+ pll := int32(res)
+ job.Spec.Parallelism = &pll
+ }
+ }
+ if js.Completions != "" {
+ res, err := strconv.ParseInt(js.Completions, 10, 32)
+ if err == nil {
+ cpt := int32(res)
+ job.Spec.Completions = &cpt
+ }
+ }
+ }
+ as.SetJob(job)
+}
+
+func initBaseCronJob(as *v1.AppService, service *dbmodel.TenantServices) {
+ as.ServiceType = v1.TypeCronJob
+ cronJob := as.GetCronJob()
+ if cronJob == nil {
+ cronJob = &v1beta1.CronJob{}
+ }
+ injectLabels := getInjectLabels(as)
+ jobTemp := v1beta1.JobTemplateSpec{}
+ jobTemp.Name = as.GetK8sWorkloadName()
+ jobTemp.Namespace = as.GetNamespace()
+ jobTemp.Labels = as.GetCommonLabels(jobTemp.Labels, map[string]string{
+ "name": service.ServiceAlias,
+ "version": service.DeployVersion,
+ }, injectLabels)
+ if service.JobStrategy != ""{
+ var js *apimodel.JobStrategy
+ err := json.Unmarshal([]byte(service.JobStrategy), &js)
+ if err != nil {
+ logrus.Error("job strategy json unmarshal error", err)
+ }
+ if js.ActiveDeadlineSeconds != "" {
+ ads, err := strconv.ParseInt(js.ActiveDeadlineSeconds, 10, 64)
+ if err == nil {
+ jobTemp.Spec.ActiveDeadlineSeconds = &ads
+ }
+ }
+ if js.BackoffLimit != "" {
+ res, err := strconv.ParseInt(js.BackoffLimit, 10, 32)
+ if err == nil {
+ bkl := int32(res)
+ jobTemp.Spec.BackoffLimit = &bkl
+ }
+ }
+ if js.Parallelism != "" {
+ res, err := strconv.ParseInt(js.Parallelism, 10, 32)
+ if err == nil {
+ pll := int32(res)
+ jobTemp.Spec.Parallelism = &pll
+ }
+ }
+ if js.Completions != "" {
+ res, err := strconv.ParseInt(js.Completions, 10, 32)
+ if err == nil {
+ cpt := int32(res)
+ jobTemp.Spec.Completions = &cpt
+ }
+ }
+ cronJob.Spec.Schedule = js.Schedule
+ }
+ cronJob.Spec.JobTemplate = jobTemp
+ cronJob.Namespace = as.GetNamespace()
+ cronJob.Name = as.GetK8sWorkloadName()
+ as.SetCronJob(cronJob)
+}
+
func getInjectLabels(as *v1.AppService) map[string]string {
mode, err := adaptor.NewAppGoveranceModeHandler(as.GovernanceMode, nil)
if err != nil {
diff --git a/worker/appm/conversion/version.go b/worker/appm/conversion/version.go
index 80f8faa4a..42d9dfd6d 100644
--- a/worker/appm/conversion/version.go
+++ b/worker/appm/conversion/version.go
@@ -19,14 +19,16 @@
package conversion
import (
+ "encoding/json"
"fmt"
- "github.com/goodrain/rainbond/builder/sources"
"net"
"os"
"sort"
"strconv"
"strings"
+ "github.com/goodrain/rainbond/builder/sources"
+
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/model"
@@ -41,6 +43,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
+ "sigs.k8s.io/yaml"
)
//TenantServiceVersion service deploy version conv. define pod spec
@@ -70,20 +73,18 @@ func TenantServiceVersion(as *v1.AppService, dbmanager db.Manager) error {
dv.SetVolume(dbmodel.ShareFileVolumeType, "kube-config", "/etc/kubernetes", "/grdata/kubernetes", corev1.HostPathDirectoryOrCreate, true)
}
nodeSelector := createNodeSelector(as, dbmanager)
- tolerations := createToleration(nodeSelector)
- injectLabels := getInjectLabels(as)
+ labels := createLabels(as, dbmanager)
+ tolerations := createToleration(nodeSelector, as, dbmanager)
+ volumes := getVolumes(dv, as, dbmanager)
podtmpSpec := corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: as.GetCommonLabels(map[string]string{
- "name": as.ServiceAlias,
- "version": as.DeployVersion,
- }, injectLabels),
+ Labels: labels,
Annotations: createPodAnnotations(as),
Name: as.GetK8sWorkloadName() + "-pod-spec",
},
Spec: corev1.PodSpec{
ImagePullSecrets: setImagePullSecrets(),
- Volumes: dv.GetVolumes(),
+ Volumes: volumes,
Containers: []corev1.Container{*container},
NodeSelector: nodeSelector,
Tolerations: tolerations,
@@ -113,13 +114,20 @@ func TenantServiceVersion(as *v1.AppService, dbmanager db.Manager) error {
}
return ""
}(),
+ ServiceAccountName: createServiceAccountName(as, dbmanager),
},
}
var terminationGracePeriodSeconds int64 = 10
if as.GetDeployment() != nil {
podtmpSpec.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
}
- //set to deployment or statefulset
+ if as.GetJob() != nil {
+ podtmpSpec.Spec.RestartPolicy = "Never"
+ }
+ if as.GetCronJob() != nil {
+ podtmpSpec.Spec.RestartPolicy = "OnFailure"
+ }
+ //set to deployment or statefulset job or cronjob
as.SetPodTemplate(podtmpSpec)
return nil
}
@@ -158,7 +166,7 @@ func getMainContainer(as *v1.AppService, version *dbmodel.VersionInfo, dv *volum
Ports: ports,
Env: envs,
EnvFrom: envFromSecrets,
- VolumeMounts: dv.GetVolumeMounts(),
+ VolumeMounts: createVolumeMounts(dv, as, dbmanager),
LivenessProbe: createProbe(as, dbmanager, "liveness"),
ReadinessProbe: createProbe(as, dbmanager, "readiness"),
Resources: resources,
@@ -172,6 +180,17 @@ func getMainContainer(as *v1.AppService, version *dbmodel.VersionInfo, dv *volum
logrus.Infof("service id: %s; enable privileged.", as.ServiceID)
c.SecurityContext = &corev1.SecurityContext{Privileged: util.Bool(true)}
}
+ privilegedAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNamePrivileged)
+ if err != nil && err != gorm.ErrRecordNotFound {
+ return nil, fmt.Errorf("get by privileged attribute error: %v", err)
+ }
+ if privilegedAttribute != nil {
+ pril, err := strconv.ParseBool(privilegedAttribute.AttributeValue)
+ if err != nil {
+ return nil, err
+ }
+ c.SecurityContext = &corev1.SecurityContext{Privileged: util.Bool(pril)}
+ }
return c, nil
}
@@ -364,6 +383,23 @@ func createEnv(as *v1.AppService, dbmanager db.Manager, envVarSecrets []*corev1.
as.ExtensionSet[strings.ToLower(k[3:])] = v
}
}
+ var customEnvs []corev1.EnvVar
+ envAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameENV)
+ if err != nil {
+ logrus.Warn("get by env attribute error", err)
+ return envs, nil
+ }
+ envAttributeJSON, err := yaml.YAMLToJSON([]byte(envAttribute.AttributeValue))
+ if err != nil {
+ logrus.Warn("envAttribute yaml to json error", err)
+ return envs, nil
+ }
+ err = json.Unmarshal(envAttributeJSON, &customEnvs)
+ if err != nil {
+ logrus.Warn("envAttribute json unmarshal error", err)
+ return envs, nil
+ }
+ envs = append(envs, customEnvs...)
return envs, nil
}
@@ -494,6 +530,28 @@ func createVolumes(as *v1.AppService, version *dbmodel.VersionInfo, envs []corev
return define, nil
}
+func getVolumes(dv *volume.Define, as *v1.AppService, dbmanager db.Manager) []corev1.Volume {
+ volumes := dv.GetVolumes()
+ volumeAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameVolumes)
+ if err != nil {
+ logrus.Warn("get by volumes attribute error", err)
+ return volumes
+ }
+ var vs []corev1.Volume
+ VolumeAttributeJSON, err := yaml.YAMLToJSON([]byte(volumeAttribute.AttributeValue))
+ if err != nil {
+ logrus.Warn("volumeAttribute yaml to json error", err)
+ return volumes
+ }
+ err = json.Unmarshal(VolumeAttributeJSON, &vs)
+ if err != nil {
+ logrus.Warn("volumeAttribute json unmarshal error", err)
+ return volumes
+ }
+ volumes = append(volumes, vs...)
+ return volumes
+}
+
func createResources(as *v1.AppService) corev1.ResourceRequirements {
var cpuRequest, cpuLimit int64
if limit, ok := as.ExtensionSet["cpulimit"]; ok {
@@ -661,8 +719,36 @@ func createNodeSelector(as *v1.AppService, dbmanager db.Manager) map[string]stri
}
}
}
+ selectorAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameNodeSelector)
+ if err != nil {
+ logrus.Warn("get by NodeSelector attribute error", err)
+ return selector
+ }
+ err = json.Unmarshal([]byte(selectorAttribute.AttributeValue), &selector)
+ if err != nil {
+ logrus.Warn("selector json unmarshal error", err)
+ return selector
+ }
return selector
}
+
+func createLabels(as *v1.AppService, dbmanager db.Manager) map[string]string {
+ labels := make(map[string]string)
+ labelsAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameLabels)
+ if err == nil {
+ err = json.Unmarshal([]byte(labelsAttribute.AttributeValue), &labels)
+ if err == nil{
+ logrus.Infof("labelsAttribute:%s", labels)
+ }
+ }
+ labels["name"] = as.ServiceAlias
+ labels["version"] = as.DeployVersion
+ injectLabels := getInjectLabels(as)
+ resultLabel := as.GetCommonLabels(labels, injectLabels)
+ return resultLabel
+}
+
+
func createAffinity(as *v1.AppService, dbmanager db.Manager) *corev1.Affinity {
var affinity corev1.Affinity
nsr := make([]corev1.NodeSelectorRequirement, 0)
@@ -769,6 +855,21 @@ func createAffinity(as *v1.AppService, dbmanager db.Manager) *corev1.Affinity {
RequiredDuringSchedulingIgnoredDuringExecution: podAntAffinity,
}
}
+ affinityAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameAffinity)
+ if err != nil {
+ logrus.Warn("get by affinity attribute error", err)
+ return &affinity
+ }
+ AffinityAttributeJSON, err := yaml.YAMLToJSON([]byte(affinityAttribute.AttributeValue))
+ if err != nil {
+ logrus.Warn("Affinity attribute yaml to json error", err)
+ return &affinity
+ }
+ err = json.Unmarshal(AffinityAttributeJSON, &affinity)
+ if err != nil {
+ logrus.Warn("affinity json unmarshal error", err)
+ return &affinity
+ }
return &affinity
}
@@ -795,7 +896,7 @@ func setImagePullSecrets() []corev1.LocalObjectReference {
}
}
-func createToleration(nodeSelector map[string]string) []corev1.Toleration {
+func createToleration(nodeSelector map[string]string, as *v1.AppService, dbmanager db.Manager) []corev1.Toleration {
var tolerations []corev1.Toleration
if value, exist := nodeSelector["type"]; exist && value == "virtual-kubelet" {
tolerations = append(tolerations, corev1.Toleration{
@@ -803,6 +904,24 @@ func createToleration(nodeSelector map[string]string) []corev1.Toleration {
Operator: corev1.TolerationOpExists,
})
}
+ tolerationAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameTolerations)
+ if err != nil {
+ logrus.Warn("get by toleration attribute error", err)
+ return tolerations
+ }
+ var tolers []corev1.Toleration
+ tolerationAttributeJSON, err := yaml.YAMLToJSON([]byte(tolerationAttribute.AttributeValue))
+ if err != nil {
+ logrus.Warn("toleration attribute yaml to json error", err)
+ return tolerations
+ }
+ err = json.Unmarshal(tolerationAttributeJSON, &tolers)
+ if err != nil {
+ logrus.Warn("toleration json unmarshal error", err)
+ return tolerations
+ }
+ tolerations = append(tolerations, tolers...)
+
return tolerations
}
@@ -825,3 +944,37 @@ func createHostAliases(as *v1.AppService) []corev1.HostAlias {
}
return re
}
+
+func createServiceAccountName(as *v1.AppService, dbmanager db.Manager) string {
+ var serviceAN string
+ sa, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameServiceAccountName)
+ if err != nil {
+ logrus.Warn("get by ServiceAccountName attribute error", err)
+ return ""
+ }
+ serviceAN = sa.AttributeValue
+ return serviceAN
+}
+
+func createVolumeMounts(dv *volume.Define, as *v1.AppService, dbmanager db.Manager) []corev1.VolumeMount {
+ volumeMounts := dv.GetVolumeMounts()
+ volumeMountsAttribute, err := dbmanager.ComponentK8sAttributeDao().GetByComponentIDAndName(as.ServiceID, model.K8sAttributeNameVolumeMounts)
+ if err != nil {
+ logrus.Warn("get by volumeMounts attribute error", err)
+ return volumeMounts
+ }
+
+ var vms []corev1.VolumeMount
+ VolumeMountsAttributeJSON, err := yaml.YAMLToJSON([]byte(volumeMountsAttribute.AttributeValue))
+ if err != nil {
+ logrus.Warn("volumeMounts attribute yaml to json error", err)
+ return volumeMounts
+ }
+ err = json.Unmarshal(VolumeMountsAttributeJSON, &vms)
+ if err != nil {
+ logrus.Warn("volumeMounts json unmarshal error", err)
+ return volumeMounts
+ }
+ volumeMounts = append(volumeMounts, vms...)
+ return volumeMounts
+}
diff --git a/worker/appm/store/informer.go b/worker/appm/store/informer.go
index 140931969..d029680cb 100644
--- a/worker/appm/store/informer.go
+++ b/worker/appm/store/informer.go
@@ -43,6 +43,8 @@ type Informer struct {
HelmApp cache.SharedIndexInformer
ComponentDefinition cache.SharedIndexInformer
ThirdComponent cache.SharedIndexInformer
+ Job cache.SharedIndexInformer
+ CronJob cache.SharedIndexInformer
CRS map[string]cache.SharedIndexInformer
}
@@ -74,12 +76,14 @@ func (i *Informer) Start(stop chan struct{}) {
go i.HelmApp.Run(stop)
go i.ComponentDefinition.Run(stop)
go i.ThirdComponent.Run(stop)
+ go i.Job.Run(stop)
+ go i.CronJob.Run(stop)
}
//Ready if all kube informers is syncd, store is ready
func (i *Informer) Ready() bool {
if i.Namespace.HasSynced() && i.Ingress.HasSynced() && i.Service.HasSynced() && i.Secret.HasSynced() &&
- i.StatefulSet.HasSynced() && i.Deployment.HasSynced() && i.Pod.HasSynced() &&
+ i.StatefulSet.HasSynced() && i.Deployment.HasSynced() && i.Pod.HasSynced() && i.Pod.HasSynced() && i.CronJob.HasSynced() &&
i.ConfigMap.HasSynced() && i.Nodes.HasSynced() && i.Events.HasSynced() &&
i.HorizontalPodAutoscaler.HasSynced() && i.StorageClass.HasSynced() && i.Claims.HasSynced() && i.CRD.HasSynced() {
return true
diff --git a/worker/appm/store/lister.go b/worker/appm/store/lister.go
index 6a383bac3..7899860e9 100644
--- a/worker/appm/store/lister.go
+++ b/worker/appm/store/lister.go
@@ -23,9 +23,11 @@ import (
crdlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1"
appsv1 "k8s.io/client-go/listers/apps/v1"
autoscalingv2 "k8s.io/client-go/listers/autoscaling/v2beta2"
+ v1 "k8s.io/client-go/listers/batch/v1"
+ v1beta1 "k8s.io/client-go/listers/batch/v1beta1"
corev1 "k8s.io/client-go/listers/core/v1"
- betav1 "k8s.io/client-go/listers/networking/v1beta1"
networkingv1 "k8s.io/client-go/listers/networking/v1"
+ betav1 "k8s.io/client-go/listers/networking/v1beta1"
storagev1 "k8s.io/client-go/listers/storage/v1"
)
@@ -49,4 +51,6 @@ type Lister struct {
HelmApp v1alpha1.HelmAppLister
ComponentDefinition v1alpha1.ComponentDefinitionLister
ThirdComponent v1alpha1.ThirdComponentLister
+ Job v1.JobLister
+ CronJob v1beta1.CronJobLister
}
diff --git a/worker/appm/store/store.go b/worker/appm/store/store.go
index b8b482bb8..a981635d5 100644
--- a/worker/appm/store/store.go
+++ b/worker/appm/store/store.go
@@ -21,6 +21,8 @@ package store
import (
"context"
"fmt"
+ batchv1 "k8s.io/api/batch/v1"
+ "k8s.io/api/batch/v1beta1"
betav1 "k8s.io/api/networking/v1beta1"
"os"
"sync"
@@ -206,7 +208,6 @@ func NewStore(
store.informers.ConfigMap = infFactory.Core().V1().ConfigMaps().Informer()
store.listers.ConfigMap = infFactory.Core().V1().ConfigMaps().Lister()
-
if k8sutil.IsHighVersion() {
store.informers.Ingress = infFactory.Networking().V1().Ingresses().Informer()
store.listers.Ingress = infFactory.Networking().V1().Ingresses().Lister()
@@ -245,7 +246,10 @@ func NewStore(
store.listers.ComponentDefinition = rainbondInformer.Rainbond().V1alpha1().ComponentDefinitions().Lister()
store.informers.ComponentDefinition = rainbondInformer.Rainbond().V1alpha1().ComponentDefinitions().Informer()
store.informers.ComponentDefinition.AddEventHandlerWithResyncPeriod(componentdefinition.GetComponentDefinitionBuilder(), time.Second*300)
-
+ store.informers.Job = infFactory.Batch().V1().Jobs().Informer()
+ store.listers.Job = infFactory.Batch().V1().Jobs().Lister()
+ store.informers.CronJob = infFactory.Batch().V1beta1().CronJobs().Informer()
+ store.listers.CronJob = infFactory.Batch().V1beta1().CronJobs().Lister()
// Endpoint Event Handler
epEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
@@ -301,6 +305,8 @@ func NewStore(
store.informers.Namespace.AddEventHandler(store.nsEventHandler())
store.informers.Deployment.AddEventHandlerWithResyncPeriod(store, time.Second*10)
store.informers.StatefulSet.AddEventHandlerWithResyncPeriod(store, time.Second*10)
+ store.informers.Job.AddEventHandlerWithResyncPeriod(store, time.Second*10)
+ store.informers.CronJob.AddEventHandlerWithResyncPeriod(store, time.Second*10)
store.informers.Pod.AddEventHandlerWithResyncPeriod(store.podEventHandler(), time.Second*10)
store.informers.Secret.AddEventHandlerWithResyncPeriod(store, time.Second*10)
store.informers.Service.AddEventHandlerWithResyncPeriod(store, time.Second*10)
@@ -400,6 +406,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
serviceID := deployment.Labels["service_id"]
version := deployment.Labels["version"]
createrID := deployment.Labels["creater_id"]
+ migrator := deployment.Labels["migrator"]
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
@@ -407,14 +414,84 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
}
if appservice != nil {
appservice.SetDeployment(deployment)
+ if migrator == "rainbond" {
+ label := "service_id=" + serviceID
+ pods, _ := a.conf.KubeClient.CoreV1().Pods(deployment.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: label})
+ if pods != nil {
+ for _, pod := range pods.Items {
+ pod := pod
+ appservice.SetPods(&pod)
+ }
+ }
+ }
return
}
+
+ }
+ }
+ if job, ok := obj.(*batchv1.Job); ok {
+ serviceID := job.Labels["service_id"]
+ version := job.Labels["version"]
+ createrID := job.Labels["creater_id"]
+ migrator := job.Labels["migrator"]
+ if serviceID != "" && version != "" && createrID != "" {
+ appservice, err := a.getAppService(serviceID, version, createrID, true)
+ if err == conversion.ErrServiceNotFound {
+ a.conf.KubeClient.BatchV1().Jobs(job.Namespace).Delete(context.Background(), job.Name, metav1.DeleteOptions{})
+ }
+ if appservice != nil {
+ appservice.SetJob(job)
+ if migrator == "rainbond" {
+ label := "controller-uid=" + job.Spec.Selector.MatchLabels["controller-uid"]
+ pods, _ := a.conf.KubeClient.CoreV1().Pods(job.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: label})
+ if pods != nil {
+ for _, pod := range pods.Items {
+ pod := pod
+ appservice.SetPods(&pod)
+ }
+ }
+ }
+ return
+ }
+
+ }
+ }
+ if cjob, ok := obj.(*v1beta1.CronJob); ok {
+ serviceID := cjob.Labels["service_id"]
+ version := cjob.Labels["version"]
+ createrID := cjob.Labels["creater_id"]
+ migrator := cjob.Labels["migrator"]
+ if serviceID != "" && version != "" && createrID != "" {
+ appservice, err := a.getAppService(serviceID, version, createrID, true)
+ if err == conversion.ErrServiceNotFound {
+ a.conf.KubeClient.BatchV1beta1().CronJobs(cjob.Namespace).Delete(context.Background(), cjob.Name, metav1.DeleteOptions{})
+ }
+ if appservice != nil {
+ appservice.SetCronJob(cjob)
+ if migrator == "rainbond" {
+ label := "service_id=" + serviceID
+ jobList, _ := a.conf.KubeClient.BatchV1().Jobs(cjob.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: label})
+ for _, job := range jobList.Items {
+ label := "controller-uid=" + job.Spec.Selector.MatchLabels["controller-uid"]
+ pods, _ := a.conf.KubeClient.CoreV1().Pods(cjob.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: label})
+ if pods != nil {
+ for _, pod := range pods.Items {
+ pod := pod
+ appservice.SetPods(&pod)
+ }
+ }
+ }
+ }
+ return
+ }
+
}
}
if statefulset, ok := obj.(*appsv1.StatefulSet); ok {
serviceID := statefulset.Labels["service_id"]
version := statefulset.Labels["version"]
createrID := statefulset.Labels["creater_id"]
+ migrator := statefulset.Labels["migrator"]
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
@@ -422,6 +499,16 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
}
if appservice != nil {
appservice.SetStatefulSet(statefulset)
+ if migrator == "rainbond" {
+ label := "service_id=" + serviceID
+ pods, _ := a.conf.KubeClient.CoreV1().Pods(statefulset.Namespace).List(context.Background(), metav1.ListOptions{LabelSelector: label})
+ if pods != nil {
+ for _, pod := range pods.Items {
+ pod := pod
+ appservice.SetPods(&pod)
+ }
+ }
+ }
return
}
}
@@ -517,6 +604,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
}
}
}
+
if hpa, ok := obj.(*autoscalingv2.HorizontalPodAutoscaler); ok {
serviceID := hpa.Labels["service_id"]
version := hpa.Labels["version"]
@@ -862,6 +950,24 @@ func (a *appRuntimeStore) UpdateGetAppService(serviceID string) *v1.AppService {
appService.SetDeployment(deploy)
}
}
+ if job := appService.GetJob(); job != nil {
+ j, err := a.listers.Job.Jobs(job.Namespace).Get(job.Name)
+ if err != nil && k8sErrors.IsNotFound(err) {
+ appService.DeleteJob(job)
+ }
+ if j != nil {
+ appService.SetJob(j)
+ }
+ }
+ if cronjob := appService.GetCronJob(); cronjob != nil {
+ crjob, err := a.listers.CronJob.CronJobs(cronjob.Namespace).Get(cronjob.Name)
+ if err != nil && k8sErrors.IsNotFound(err) {
+ appService.DeleteCronJob(cronjob)
+ }
+ if crjob != nil {
+ appService.SetCronJob(crjob)
+ }
+ }
if services := appService.GetServices(true); services != nil {
for _, service := range services {
se, err := a.listers.Service.Services(service.Namespace).Get(service.Name)
@@ -1387,6 +1493,22 @@ func (a *appRuntimeStore) scalingRecordServiceAndRuleID(evt *corev1.Event) (stri
}
serviceID = deploy.GetLabels()["service_id"]
ruleID = deploy.GetLabels()["rule_id"]
+ case "Job":
+ job, err := a.listers.Job.Jobs(evt.InvolvedObject.Namespace).Get(evt.InvolvedObject.Name)
+ if err != nil {
+ logrus.Warningf("retrieve job: %v", err)
+ return "", ""
+ }
+ serviceID = job.GetLabels()["service_id"]
+ ruleID = job.GetLabels()["rule_id"]
+ case "CronJob":
+ cjob, err := a.listers.CronJob.CronJobs(evt.InvolvedObject.Namespace).Get(evt.InvolvedObject.Name)
+ if err != nil {
+ logrus.Warningf("retrieve cronjob: %v", err)
+ return "", ""
+ }
+ serviceID = cjob.GetLabels()["service_id"]
+ ruleID = cjob.GetLabels()["rule_id"]
case "Statefulset":
statefulset, err := a.listers.StatefulSet.StatefulSets(evt.InvolvedObject.Namespace).Get(evt.InvolvedObject.Name)
if err != nil {
diff --git a/worker/appm/types/v1/patch.go b/worker/appm/types/v1/patch.go
index f6b038db8..6929ff57d 100644
--- a/worker/appm/types/v1/patch.go
+++ b/worker/appm/types/v1/patch.go
@@ -210,6 +210,7 @@ func getStatefulsetAllowFields(s *v1.StatefulSet) *v1.StatefulSet {
},
UpdateStrategy: s.Spec.UpdateStrategy,
},
+ ObjectMeta: s.Spec.Template.ObjectMeta,
}
}
@@ -241,6 +242,7 @@ func getDeploymentAllowFields(d *v1.Deployment) *v1.Deployment {
HostNetwork: d.Spec.Template.Spec.HostNetwork,
SchedulerName: d.Spec.Template.Spec.SchedulerName,
},
+ ObjectMeta: d.Spec.Template.ObjectMeta,
},
},
}
diff --git a/worker/appm/types/v1/pod_status.go b/worker/appm/types/v1/pod_status.go
index ca0a777d4..ff3f6d86e 100644
--- a/worker/appm/types/v1/pod_status.go
+++ b/worker/appm/types/v1/pod_status.go
@@ -24,7 +24,7 @@ import (
//IsPodTerminated Exception evicted pod
func IsPodTerminated(pod *corev1.Pod) bool {
- if phase := pod.Status.Phase; phase != corev1.PodPending && phase != corev1.PodRunning && phase != corev1.PodUnknown {
+ if phase := pod.Status.Phase; phase != corev1.PodPending && phase != corev1.PodRunning && phase != corev1.PodUnknown && phase != corev1.PodSucceeded && phase != corev1.PodFailed {
return true
}
return false
diff --git a/worker/appm/types/v1/status.go b/worker/appm/types/v1/status.go
index cf2e5e222..82124b5be 100644
--- a/worker/appm/types/v1/status.go
+++ b/worker/appm/types/v1/status.go
@@ -93,6 +93,8 @@ var (
BUILDEFAILURE = "build_failure"
//UNDEPLOY init status
UNDEPLOY = "undeploy"
+ //SUCCEEDED if job and cronjob is succeeded
+ SUCCEEDED = "succeeded"
)
func conversionThirdComponent(obj runtime.Object) *v1alpha1.ThirdComponent {
@@ -158,6 +160,44 @@ func (a *AppService) GetServiceStatus() string {
if a.IsClosed() {
return CLOSED
}
+ if a.job != nil {
+ succeed := 0
+ failed := 0
+ for _, po := range a.pods {
+ if po.Status.Phase == "Succeeded" {
+ succeed++
+ }
+ if po.Status.Phase == "Failed" {
+ failed++
+ }
+ }
+ if len(a.pods) == succeed {
+ return SUCCEEDED
+ }
+ if failed > 0 {
+ return ABNORMAL
+ }
+ return RUNNING
+ }
+ if a.cronjob != nil {
+ succeed := 0
+ failed := 0
+ for _, po := range a.pods {
+ if po.Status.Phase == "Succeeded" {
+ succeed++
+ }
+ if po.Status.Phase == "Failed" {
+ failed++
+ }
+ }
+ if len(a.pods) == succeed {
+ return RUNNING
+ }
+ if failed > 0 {
+ return ABNORMAL
+ }
+ return RUNNING
+ }
if a.statefulset == nil && a.deployment == nil && len(a.pods) > 0 {
return STOPPING
}
diff --git a/worker/appm/types/v1/v1.go b/worker/appm/types/v1/v1.go
index 4bb9df137..2f425a2fc 100644
--- a/worker/appm/types/v1/v1.go
+++ b/worker/appm/types/v1/v1.go
@@ -20,11 +20,13 @@ package v1
import (
"fmt"
- "github.com/goodrain/rainbond/util/k8s"
"os"
"strconv"
"strings"
+ "github.com/goodrain/rainbond/util/k8s"
+ betav1 "k8s.io/api/networking/v1beta1"
+
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
@@ -33,9 +35,10 @@ import (
"github.com/sirupsen/logrus"
v1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
+ batchv1 "k8s.io/api/batch/v1"
+ "k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
- betav1 "k8s.io/api/networking/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
@@ -71,6 +74,12 @@ var TypeStatefulSet AppServiceType = "statefulset"
//TypeDeployment deployment
var TypeDeployment AppServiceType = "deployment"
+//TypeJob deployment
+var TypeJob AppServiceType = "job"
+
+//TypeCronJob deployment
+var TypeCronJob AppServiceType = "cronjob"
+
//TypeReplicationController rc
var TypeReplicationController AppServiceType = "replicationcontroller"
@@ -153,6 +162,8 @@ type AppService struct {
tenant *corev1.Namespace
statefulset *v1.StatefulSet
deployment *v1.Deployment
+ job *batchv1.Job
+ cronjob *v1beta1.CronJob
workload runtime.Object
hpas []*autoscalingv2.HorizontalPodAutoscaler
delHPAs []*autoscalingv2.HorizontalPodAutoscaler
@@ -218,6 +229,16 @@ func (a *AppService) DeleteDeployment(d *v1.Deployment) {
a.deployment = nil
}
+//DeleteJob delete kubernetes job model
+func (a *AppService) DeleteJob(d *batchv1.Job) {
+ a.job = nil
+}
+
+//DeleteCronJob delete kubernetes cronjob model
+func (a *AppService) DeleteCronJob(d *v1beta1.CronJob) {
+ a.cronjob = nil
+}
+
//GetStatefulSet get kubernetes statefulset model
func (a AppService) GetStatefulSet() *v1.StatefulSet {
return a.statefulset
@@ -234,6 +255,36 @@ func (a *AppService) SetStatefulSet(d *v1.StatefulSet) {
a.calculateComponentMemoryRequest()
}
+//GetJob get kubernetes job model
+func (a AppService) GetJob() *batchv1.Job {
+ return a.job
+}
+
+//SetJob set kubernetes job model
+func (a *AppService) SetJob(d *batchv1.Job) {
+ a.job = d
+ a.workload = d
+ if v, ok := d.Spec.Template.Labels["version"]; ok && v != "" {
+ a.DeployVersion = v
+ }
+ a.calculateComponentMemoryRequest()
+}
+
+//GetCronJob get kubernetes cronjob model
+func (a AppService) GetCronJob() *v1beta1.CronJob {
+ return a.cronjob
+}
+
+//SetCronJob set kubernetes cronjob model
+func (a *AppService) SetCronJob(d *v1beta1.CronJob) {
+ a.cronjob = d
+ a.workload = d
+ if v, ok := d.Spec.JobTemplate.Labels["version"]; ok && v != "" {
+ a.DeployVersion = v
+ }
+ a.calculateComponentMemoryRequest()
+}
+
//SetReplicaSets set kubernetes replicaset
func (a *AppService) SetReplicaSets(d *v1.ReplicaSet) {
if len(a.replicasets) > 0 {
@@ -522,6 +573,12 @@ func (a *AppService) SetPodTemplate(d corev1.PodTemplateSpec) {
if a.deployment != nil {
a.deployment.Spec.Template = d
}
+ if a.job != nil {
+ a.job.Spec.Template = d
+ }
+ if a.cronjob != nil {
+ a.cronjob.Spec.JobTemplate.Spec.Template = d
+ }
a.calculateComponentMemoryRequest()
}
@@ -533,6 +590,12 @@ func (a *AppService) GetPodTemplate() *corev1.PodTemplateSpec {
if a.deployment != nil {
return &a.deployment.Spec.Template
}
+ if a.job != nil {
+ return &a.job.Spec.Template
+ }
+ if a.cronjob != nil {
+ return &a.cronjob.Spec.JobTemplate.Spec.Template
+ }
return nil
}
diff --git a/worker/gc/gc.go b/worker/gc/gc.go
index f1a03f97d..92fb23be0 100644
--- a/worker/gc/gc.go
+++ b/worker/gc/gc.go
@@ -118,6 +118,12 @@ func (g *GarbageCollector) DelKubernetesObjects(serviceGCReq model.ServiceGCTask
if err := g.clientset.AppsV1().StatefulSets(namespace).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete statefulsets(%s): %v", serviceGCReq.ServiceID, err)
}
+ if err := g.clientset.BatchV1().Jobs(namespace).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
+ logrus.Warningf("[DelKubernetesObjects] delete job(%s): %v", serviceGCReq.ServiceID, err)
+ }
+ if err := g.clientset.BatchV1beta1().CronJobs(namespace).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
+ logrus.Warningf("[DelKubernetesObjects] delete cronjob(%s): %v", serviceGCReq.ServiceID, err)
+ }
if err := g.clientset.ExtensionsV1beta1().Ingresses(namespace).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete extensions ingresses(%s): %v", serviceGCReq.ServiceID, err)
}
@@ -151,6 +157,7 @@ func (g *GarbageCollector) DelKubernetesObjects(serviceGCReq model.ServiceGCTask
}
}
+// listOptionsServiceID -
func (g *GarbageCollector) listOptionsServiceID(serviceID string) metav1.ListOptions {
labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{
"creator": "Rainbond",
@@ -160,3 +167,13 @@ func (g *GarbageCollector) listOptionsServiceID(serviceID string) metav1.ListOpt
LabelSelector: labels.Set(labelSelector.MatchLabels).String(),
}
}
+
+// DelComponentPkg deletes component package
+func (g *GarbageCollector) DelComponentPkg(serviceGCReq model.ServiceGCTaskBody) {
+ logrus.Infof("service id: %s; delete component package.", serviceGCReq.ServiceID)
+ // log generated during service running
+ pkgPath := fmt.Sprintf("/grdata/package_build/components/%s", serviceGCReq.ServiceID)
+ if err := os.RemoveAll(pkgPath); err != nil {
+ logrus.Warningf("remove component package: %v", err)
+ }
+}
diff --git a/worker/handle/manager.go b/worker/handle/manager.go
index 17696366b..a1c4db466 100644
--- a/worker/handle/manager.go
+++ b/worker/handle/manager.go
@@ -463,6 +463,7 @@ func (m *Manager) ExecServiceGCTask(task *model.Task) error {
m.garbageCollector.DelPvPvcByServiceID(serviceGCReq)
m.garbageCollector.DelVolumeData(serviceGCReq)
m.garbageCollector.DelKubernetesObjects(serviceGCReq)
+ m.garbageCollector.DelComponentPkg(serviceGCReq)
return nil
}
diff --git a/worker/server/server.go b/worker/server/server.go
index db42462bd..695132897 100644
--- a/worker/server/server.go
+++ b/worker/server/server.go
@@ -701,10 +701,10 @@ func (r *RuntimeServer) GetAppVolumeStatus(ctx context.Context, re *pb.ServiceRe
pods := as.GetPods(false)
for _, pod := range pods {
// if pod is terminated, volume status of pod is NOT_READY
- if v1.IsPodTerminated(pod) {
- continue
- }
- // Exception pod information due to node loss is no longer displayed, so volume status is NOT_READY
+ // if v1.IsPodTerminated(pod) {
+ // continue
+ // }
+ // // Exception pod information due to node loss is no longer displayed, so volume status is NOT_READY
if v1.IsPodNodeLost(pod) {
continue
}
diff --git a/worker/util/pod.go b/worker/util/pod.go
index 58f1c275e..6e7c39db8 100644
--- a/worker/util/pod.go
+++ b/worker/util/pod.go
@@ -94,6 +94,19 @@ func DescribePodStatus(clientset kubernetes.Interface, pod *corev1.Pod, podStatu
if cstatus.State.Terminated.Reason == "OOMKilled" {
podStatus.Advice = PodStatusAdviceOOM.String()
}
+ for _, OwnerReference := range pod.OwnerReferences{
+ if OwnerReference.Kind == "Job"{
+ if cstatus.State.Terminated.Reason == "Completed" {
+ podStatus.Type = pb.PodStatus_SUCCEEDED
+ }
+ if cstatus.State.Terminated.Reason == "DeadlineExceeded" {
+ podStatus.Type = pb.PodStatus_FAILED
+ }
+ if cstatus.State.Terminated.Reason == "Error" {
+ podStatus.Type = pb.PodStatus_ABNORMAL
+ }
+ }
+ }
return
}
if !cstatus.Ready {