Merge pull request #1081 from goodrain/master

merge branch master to 5.3
This commit is contained in:
barnettZQG 2021-08-18 09:32:33 +08:00 committed by GitHub
commit fd1a8a6fb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 4372 additions and 5219 deletions

View File

@ -24,4 +24,4 @@ jobs:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
DOMESTIC_DOCKER_PASSWORD: ${{ secrets.DOMESTIC_DOCKER_PASSWORD }}
DOMESTIC_DOCKER_USERNAME: ${{ secrets.DOMESTIC_DOCKER_USERNAME }}
run: VERSION=v5.3.2-release ./release.sh all push
run: VERSION=v5.3.3-release ./release.sh all push

View File

@ -53,6 +53,7 @@ type TenantInterface interface {
LimitTenantMemory(w http.ResponseWriter, r *http.Request)
TenantResourcesStatus(w http.ResponseWriter, r *http.Request)
CheckResourceName(w http.ResponseWriter, r *http.Request)
Log(w http.ResponseWriter, r *http.Request)
}
//ServiceInterface ServiceInterface

View File

@ -305,6 +305,8 @@ func (v2 *V2) serviceRouter() chi.Router {
r.Put("/service-monitors/{name}", middleware.WrapEL(controller.GetManager().UpdateServiceMonitors, dbmodel.TargetTypeService, "update-app-service-monitor", dbmodel.SYNEVENTTYPE))
r.Delete("/service-monitors/{name}", middleware.WrapEL(controller.GetManager().DeleteServiceMonitors, dbmodel.TargetTypeService, "delete-app-service-monitor", dbmodel.SYNEVENTTYPE))
r.Get("/log", controller.GetManager().Log)
return r
}

View File

@ -151,6 +151,11 @@ func (a *ApplicationController) ListComponents(w http.ResponseWriter, r *http.Re
func (a *ApplicationController) DeleteApp(w http.ResponseWriter, r *http.Request) {
app := r.Context().Value(ctxutil.ContextKey("application")).(*dbmodel.Application)
var req model.EtcdCleanReq
if httputil.ValidatorRequestStructAndErrorResponse(r, w, &req, nil) {
logrus.Debugf("delete app etcd keys : %+v", req.Keys)
handler.GetEtcdHandler().CleanAllServiceData(req.Keys)
}
// Delete application
err := handler.GetApplicationHandler().DeleteApp(r.Context(), app)
if err != nil {

View File

@ -23,8 +23,7 @@ import (
"io/ioutil"
"net/http"
"os"
validator "github.com/goodrain/rainbond/util/govalidator"
"strconv"
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
@ -33,6 +32,7 @@ import (
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
validator "github.com/goodrain/rainbond/util/govalidator"
httputil "github.com/goodrain/rainbond/util/http"
"github.com/goodrain/rainbond/worker/discover/model"
"github.com/jinzhu/gorm"
@ -68,9 +68,11 @@ func (t *TenantStruct) StartService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(ctxutil.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(ctxutil.ContextKey("service")).(*dbmodel.TenantServices)
sEvent := r.Context().Value(ctxutil.ContextKey("event")).(*dbmodel.ServiceEvent)
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, sEvent.EventID, err.Error())
return
if service.Kind != "third_party" {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, sEvent.EventID, err.Error())
return
}
}
startStopStruct := &api_model.StartStopStruct{
@ -84,7 +86,6 @@ func (t *TenantStruct) StartService(w http.ResponseWriter, r *http.Request) {
return
}
httputil.ReturnSuccess(r, w, sEvent)
return
}
//StopService StopService
@ -177,7 +178,6 @@ func (t *TenantStruct) RestartService(w http.ResponseWriter, r *http.Request) {
return
}
httputil.ReturnSuccess(r, w, sEvent)
return
}
//VerticalService VerticalService
@ -213,23 +213,23 @@ func (t *TenantStruct) VerticalService(w http.ResponseWriter, r *http.Request) {
tenantID := r.Context().Value(ctxutil.ContextKey("tenant_id")).(string)
serviceID := r.Context().Value(ctxutil.ContextKey("service_id")).(string)
sEvent := r.Context().Value(ctxutil.ContextKey("event")).(*dbmodel.ServiceEvent)
var cpu_set, gpu_set, memory_set *int
var cpuSet, gpuSet, memorySet *int
if cpu, ok := data["container_cpu"].(float64); ok {
cpu_int := int(cpu)
cpu_set = &cpu_int
cpuInt := int(cpu)
cpuSet = &cpuInt
}
if memory, ok := data["container_memory"].(float64); ok {
memory_int := int(memory)
memory_set = &memory_int
memoryInt := int(memory)
memorySet = &memoryInt
}
if gpu, ok := data["container_gpu"].(float64); ok {
gpu_int := int(gpu)
gpu_set = &gpu_int
gpuInt := int(gpu)
gpuSet = &gpuInt
}
tenant := r.Context().Value(ctxutil.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(ctxutil.ContextKey("service")).(*dbmodel.TenantServices)
if memory_set != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*(*memory_set)); err != nil {
if memorySet != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*(*memorySet)); err != nil {
httputil.ReturnResNotEnough(r, w, sEvent.EventID, err.Error())
return
}
@ -238,9 +238,9 @@ func (t *TenantStruct) VerticalService(w http.ResponseWriter, r *http.Request) {
TenantID: tenantID,
ServiceID: serviceID,
EventID: sEvent.EventID,
ContainerCPU: cpu_set,
ContainerMemory: memory_set,
ContainerGPU: gpu_set,
ContainerCPU: cpuSet,
ContainerMemory: memorySet,
ContainerGPU: gpuSet,
}
if err := handler.GetServiceManager().ServiceVertical(r.Context(), verticalTask); err != nil {
httputil.ReturnError(r, w, 500, fmt.Sprintf("service vertical error. %v", err))
@ -475,7 +475,7 @@ func (t *TenantStruct) GetDeployVersion(w http.ResponseWriter, r *http.Request)
return
}
if err == gorm.ErrRecordNotFound {
httputil.ReturnError(r, w, 404, fmt.Sprintf("build version do not exist"))
httputil.ReturnError(r, w, 404, "build version do not exist")
return
}
httputil.ReturnSuccess(r, w, version)
@ -492,7 +492,7 @@ func (t *TenantStruct) GetManyDeployVersion(w http.ResponseWriter, r *http.Reque
}
serviceIDs, ok := data["service_ids"].([]interface{})
if !ok {
httputil.ReturnError(r, w, 400, fmt.Sprintf("service ids must be a array"))
httputil.ReturnError(r, w, 400, "service ids must be a array")
return
}
var list []string
@ -501,7 +501,7 @@ func (t *TenantStruct) GetManyDeployVersion(w http.ResponseWriter, r *http.Reque
}
services, err := db.GetManager().TenantServiceDao().GetServiceByIDs(list)
if err != nil {
httputil.ReturnError(r, w, 500, fmt.Sprintf(err.Error()))
httputil.ReturnError(r, w, 500, err.Error())
return
}
var versionList []*dbmodel.VersionInfo
@ -559,9 +559,11 @@ func (t *TenantStruct) UpgradeService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(ctxutil.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(ctxutil.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, upgradeRequest.EventID, err.Error())
return
if service.Kind != "third_party" {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, upgradeRequest.EventID, err.Error())
return
}
}
res, err := handler.GetOperationHandler().Upgrade(&upgradeRequest)
@ -656,7 +658,6 @@ func (t *TenantStruct) RollBack(w http.ResponseWriter, r *http.Request) {
re := handler.GetOperationHandler().RollBack(rollbackRequest)
httputil.ReturnSuccess(r, w, re)
return
}
type limitMemory struct {
@ -773,3 +774,17 @@ func GetServiceDeployInfo(w http.ResponseWriter, r *http.Request) {
}
httputil.ReturnSuccess(r, w, info)
}
// Log -
func (t *TenantStruct) Log(w http.ResponseWriter, r *http.Request) {
component := r.Context().Value(ctxutil.ContextKey("service")).(*dbmodel.TenantServices)
podName := r.URL.Query().Get("podName")
containerName := r.URL.Query().Get("containerName")
follow, _ := strconv.ParseBool(r.URL.Query().Get("follow"))
err := handler.GetServiceManager().Log(w, r, component, podName, containerName, follow)
if err != nil {
httputil.ReturnBcodeError(r, w, err)
return
}
}

View File

@ -19,7 +19,6 @@
package controller
import (
"encoding/json"
"net/http"
"github.com/goodrain/rainbond/api/handler"
@ -137,10 +136,8 @@ func (t *ThirdPartyServiceController) listEndpoints(w http.ResponseWriter, r *ht
httputil.ReturnError(r, w, 500, err.Error())
return
}
b, _ := json.Marshal(res)
logrus.Debugf("response endpoints: %s", string(b))
if res == nil || len(res) == 0 {
httputil.ReturnSuccess(r, w, []*model.EndpointResp{})
if len(res) == 0 {
httputil.ReturnSuccess(r, w, []*model.ThirdEndpoint{})
return
}
httputil.ReturnSuccess(r, w, res)

View File

@ -25,6 +25,7 @@ import (
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
api_model "github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util/bcode"
ctxutil "github.com/goodrain/rainbond/api/util/ctx"
dbmodel "github.com/goodrain/rainbond/db/model"
httputil "github.com/goodrain/rainbond/util/http"
@ -186,6 +187,11 @@ func (t *TenantStruct) UpdVolume(w http.ResponseWriter, r *http.Request) {
return
}
if req.Mode != nil && (*req.Mode > 777 || *req.Mode < 0) {
httputil.ReturnBcodeError(r, w, bcode.NewBadRequest("mode be a number between 0 and 777 (octal)"))
return
}
sid := r.Context().Value(ctxutil.ContextKey("service_id")).(string)
if err := handler.GetServiceManager().UpdVolume(sid, &req); err != nil {
httputil.ReturnError(r, w, 500, err.Error())
@ -354,6 +360,11 @@ func AddVolume(w http.ResponseWriter, r *http.Request) {
return
}
if avs.Body.Mode != nil && (*avs.Body.Mode > 777 || *avs.Body.Mode < 0) {
httputil.ReturnBcodeError(r, w, bcode.NewBadRequest("mode be a number between 0 and 777 (octal)"))
return
}
tsv := &dbmodel.TenantServiceVolume{
ServiceID: serviceID,
VolumeName: avs.Body.VolumeName,
@ -368,6 +379,7 @@ func AddVolume(w http.ResponseWriter, r *http.Request) {
BackupPolicy: avs.Body.BackupPolicy,
ReclaimPolicy: avs.Body.ReclaimPolicy,
AllowExpansion: avs.Body.AllowExpansion,
Mode: avs.Body.Mode,
}
// TODO fanyangyang validate VolumeCapacity AccessMode SharePolicy BackupPolicy ReclaimPolicy AllowExpansion

View File

@ -572,6 +572,9 @@ func (a *ApplicationAction) SyncComponents(app *dbmodel.Application, components
if err := GetGatewayHandler().SyncHTTPRules(tx, components); err != nil {
return err
}
if err := GetGatewayHandler().SyncRuleConfigs(tx, components); err != nil {
return err
}
if err := GetGatewayHandler().SyncTCPRules(tx, components); err != nil {
return err
}

View File

@ -885,8 +885,9 @@ func (g *GatewayAction) listHTTPRuleIDs(componentID string, port int) ([]string,
// SyncHTTPRules -
func (g *GatewayAction) SyncHTTPRules(tx *gorm.DB, components []*apimodel.Component) error {
var (
componentIDs []string
httpRules []*model.HTTPRule
componentIDs []string
httpRules []*model.HTTPRule
ruleExtensions []*model.RuleExtension
)
for _, component := range components {
if component.HTTPRules == nil {
@ -895,14 +896,40 @@ func (g *GatewayAction) SyncHTTPRules(tx *gorm.DB, components []*apimodel.Compon
componentIDs = append(componentIDs, component.ComponentBase.ComponentID)
for _, httpRule := range component.HTTPRules {
httpRules = append(httpRules, httpRule.DbModel(component.ComponentBase.ComponentID))
for _, ext := range httpRule.RuleExtensions {
ruleExtensions = append(ruleExtensions, &model.RuleExtension{
UUID: util.NewUUID(),
RuleID: httpRule.HTTPRuleID,
Key: ext.Key,
Value: ext.Value,
})
}
}
}
if err := g.syncRuleExtensions(tx, httpRules, ruleExtensions); err != nil {
return err
}
if err := db.GetManager().HTTPRuleDaoTransactions(tx).DeleteByComponentIDs(componentIDs); err != nil {
return err
}
return db.GetManager().HTTPRuleDaoTransactions(tx).CreateOrUpdateHTTPRuleInBatch(httpRules)
}
func (g *GatewayAction) syncRuleExtensions(tx *gorm.DB, httpRules []*model.HTTPRule, exts []*model.RuleExtension) error {
var ruleIDs []string
for _, hr := range httpRules {
ruleIDs = append(ruleIDs, hr.UUID)
}
if err := db.GetManager().RuleExtensionDaoTransactions(tx).DeleteByRuleIDs(ruleIDs); err != nil {
return err
}
return db.GetManager().RuleExtensionDaoTransactions(tx).CreateOrUpdateRuleExtensionsInBatch(exts)
}
// SyncTCPRules -
func (g *GatewayAction) SyncTCPRules(tx *gorm.DB, components []*apimodel.Component) error {
var (
@ -923,3 +950,34 @@ func (g *GatewayAction) SyncTCPRules(tx *gorm.DB, components []*apimodel.Compone
}
return db.GetManager().TCPRuleDaoTransactions(tx).CreateOrUpdateTCPRuleInBatch(tcpRules)
}
// SyncRuleConfigs -
func (g *GatewayAction) SyncRuleConfigs(tx *gorm.DB, components []*apimodel.Component) error {
var configs []*model.GwRuleConfig
var componentIDs []string
for _, component := range components {
componentIDs = append(componentIDs, component.ComponentBase.ComponentID)
if len(component.HTTPRuleConfigs) == 0 {
continue
}
for _, httpRuleConfig := range component.HTTPRuleConfigs {
configs = append(configs, httpRuleConfig.DbModel()...)
}
}
// http rule ids
rules, err := db.GetManager().HTTPRuleDao().ListByComponentIDs(componentIDs)
if err != nil {
return err
}
var ruleIDs []string
for _, rule := range rules {
ruleIDs = append(ruleIDs, rule.UUID)
}
if err := db.GetManager().GwRuleConfigDaoTransactions(tx).DeleteByRuleIDs(ruleIDs); err != nil {
return err
}
return db.GetManager().GwRuleConfigDaoTransactions(tx).CreateOrUpdateGwRuleConfigsInBatch(configs)
}

View File

@ -61,4 +61,5 @@ type GatewayHandler interface {
DeleteIngressRulesByComponentPort(tx *gorm.DB, componentID string, port int) error
SyncHTTPRules(tx *gorm.DB, components []*apimodel.Component) error
SyncTCPRules(tx *gorm.DB, components []*apimodel.Component) error
SyncRuleConfigs(tx *gorm.DB, components []*apimodel.Component) error
}

View File

@ -60,7 +60,7 @@ func InitHandle(conf option.Config,
return err
}
dbmanager := db.GetManager()
defaultServieHandler = CreateManager(conf, mqClient, etcdcli, statusCli, prometheusCli, rainbondClient)
defaultServieHandler = CreateManager(conf, mqClient, etcdcli, statusCli, prometheusCli, rainbondClient, kubeClient)
defaultPluginHandler = CreatePluginManager(mqClient)
defaultAppHandler = CreateAppManager(mqClient)
defaultTenantHandler = CreateTenManager(mqClient, statusCli, &conf, kubeClient, prometheusCli, k8sClient)

View File

@ -328,10 +328,10 @@ func (p *PluginAction) buildPlugin(b *api_model.BuildPluginStruct, plugin *dbmod
Info: b.Body.Info,
Status: "building",
}
if b.Body.PluginCPU == 0 {
if b.Body.PluginCPU < 0 {
pbv.ContainerCPU = 125
}
if b.Body.PluginMemory == 0 {
if b.Body.PluginMemory < 0 {
pbv.ContainerMemory = 50
}
if err := db.GetManager().TenantPluginBuildVersionDao().AddModel(pbv); err != nil {

View File

@ -22,6 +22,8 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
@ -29,14 +31,20 @@ import (
"github.com/coreos/etcd/clientv3"
"github.com/goodrain/rainbond/api/client/prometheus"
api_model "github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util"
"github.com/goodrain/rainbond/api/util/bcode"
"github.com/goodrain/rainbond/api/util/license"
"github.com/goodrain/rainbond/builder/parser"
"github.com/goodrain/rainbond/cmd/api/option"
"github.com/goodrain/rainbond/db"
dberr "github.com/goodrain/rainbond/db/errors"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
gclient "github.com/goodrain/rainbond/mq/client"
"github.com/goodrain/rainbond/pkg/generated/clientset/versioned"
core_util "github.com/goodrain/rainbond/util"
typesv1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/client"
"github.com/goodrain/rainbond/worker/discover/model"
"github.com/goodrain/rainbond/worker/server"
@ -46,17 +54,11 @@ import (
"github.com/pquerna/ffjson/ffjson"
"github.com/sirupsen/logrus"
"github.com/twinj/uuid"
corev1 "k8s.io/api/core/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api_model "github.com/goodrain/rainbond/api/model"
dberr "github.com/goodrain/rainbond/db/errors"
core_model "github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
eventutil "github.com/goodrain/rainbond/eventlog/util"
gclient "github.com/goodrain/rainbond/mq/client"
core_util "github.com/goodrain/rainbond/util"
typesv1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"k8s.io/apiserver/pkg/util/flushwriter"
"k8s.io/client-go/kubernetes"
)
// ErrServiceNotClosed -
@ -70,6 +72,7 @@ type ServiceAction struct {
prometheusCli prometheus.Interface
conf option.Config
rainbondClient versioned.Interface
kubeClient kubernetes.Interface
}
type dCfg struct {
@ -86,7 +89,8 @@ func CreateManager(conf option.Config,
etcdCli *clientv3.Client,
statusCli *client.AppRuntimeSyncClient,
prometheusCli prometheus.Interface,
rainbondClient versioned.Interface) *ServiceAction {
rainbondClient versioned.Interface,
kubeClient kubernetes.Interface) *ServiceAction {
return &ServiceAction{
MQClient: mqClient,
EtcdCli: etcdCli,
@ -94,6 +98,7 @@ func CreateManager(conf option.Config,
conf: conf,
prometheusCli: prometheusCli,
rainbondClient: rainbondClient,
kubeClient: kubeClient,
}
}
@ -713,10 +718,10 @@ func (s *ServiceAction) ServiceCreate(sc *api_model.ServiceStruct) error {
if sc.OSType == "windows" {
if err := db.GetManager().TenantServiceLabelDaoTransactions(tx).AddModel(&dbmodel.TenantServiceLable{
ServiceID: ts.ServiceID,
LabelKey: core_model.LabelKeyNodeSelector,
LabelKey: dbmodel.LabelKeyNodeSelector,
LabelValue: sc.OSType,
}); err != nil {
logrus.Errorf("add label %s=%s %v error, %v", core_model.LabelKeyNodeSelector, sc.OSType, ts.ServiceID, err)
logrus.Errorf("add label %s=%s %v error, %v", dbmodel.LabelKeyNodeSelector, sc.OSType, ts.ServiceID, err)
tx.Rollback()
return err
}
@ -743,12 +748,10 @@ func (s *ServiceAction) ServiceCreate(sc *api_model.ServiceStruct) error {
}
}
if sc.Endpoints.Static != nil {
trueValue := true
for _, o := range sc.Endpoints.Static {
ep := &dbmodel.Endpoint{
ServiceID: sc.ServiceID,
UUID: core_util.NewUUID(),
IsOnline: &trueValue,
}
address := o
port := 0
@ -1745,6 +1748,7 @@ func (s *ServiceAction) UpdVolume(sid string, req *api_model.UpdVolumeReq) error
return err
}
v.VolumePath = req.VolumePath
v.Mode = req.Mode
if err := db.GetManager().TenantServiceVolumeDaoTransactions(tx).UpdateModel(v); err != nil {
tx.Rollback()
return err
@ -1939,7 +1943,7 @@ func (s *ServiceAction) GetStatus(serviceID string) (*api_model.StatusList, erro
//GetServicesStatus 获取一组应用状态,若 serviceIDs为空,获取租户所有应用状态
func (s *ServiceAction) GetServicesStatus(tenantID string, serviceIDs []string) []map[string]interface{} {
if serviceIDs == nil || len(serviceIDs) == 0 {
if len(serviceIDs) == 0 {
services, _ := db.GetManager().TenantServiceDao().GetServicesByTenantID(tenantID)
for _, s := range services {
serviceIDs = append(serviceIDs, s.ServiceID)
@ -1950,11 +1954,9 @@ func (s *ServiceAction) GetServicesStatus(tenantID string, serviceIDs []string)
}
statusList := s.statusCli.GetStatuss(strings.Join(serviceIDs, ","))
var info = make([]map[string]interface{}, 0)
if statusList != nil {
for k, v := range statusList {
serviceInfo := map[string]interface{}{"service_id": k, "status": v, "status_cn": TransStatus(v), "used_mem": 0}
info = append(info, serviceInfo)
}
for k, v := range statusList {
serviceInfo := map[string]interface{}{"service_id": k, "status": v, "status_cn": TransStatus(v), "used_mem": 0}
info = append(info, serviceInfo)
}
return info
}
@ -2019,7 +2021,7 @@ func (s *ServiceAction) CreateTenant(t *dbmodel.Tenants) error {
//CreateTenandIDAndName create tenant_id and tenant_name
func (s *ServiceAction) CreateTenandIDAndName(eid string) (string, string, error) {
id := fmt.Sprintf("%s", uuid.NewV4())
id := uuid.NewV4().String()
uid := strings.Replace(id, "-", "", -1)
name := strings.Split(id, "-")[0]
logrus.Debugf("uuid is %v, name is %v", uid, name)
@ -2103,14 +2105,12 @@ func (s *ServiceAction) GetMultiServicePods(serviceIDs []string) (*K8sPodInfos,
}
convpod := func(serviceID string, pods []*pb.ServiceAppPod) []*K8sPodInfo {
var podsInfoList []*K8sPodInfo
var podNames []string
for _, v := range pods {
var podInfo K8sPodInfo
podInfo.PodName = v.PodName
podInfo.PodIP = v.PodIp
podInfo.PodStatus = v.PodStatus
podInfo.ServiceID = serviceID
podNames = append(podNames, v.PodName)
podsInfoList = append(podsInfoList, &podInfo)
}
return podsInfoList
@ -2298,23 +2298,6 @@ func (s *ServiceAction) deleteThirdComponent(ctx context.Context, component *dbm
return nil
}
// delLogFile deletes persistent data related to the service based on serviceID.
func (s *ServiceAction) delLogFile(serviceID string, eventIDs []string) {
// log generated during service running
dockerLogPath := eventutil.DockerLogFilePath(s.conf.LogPath, serviceID)
if err := os.RemoveAll(dockerLogPath); err != nil {
logrus.Warningf("remove docker log files: %v", err)
}
// log generated by the service event
eventLogPath := eventutil.EventLogFilePath(s.conf.LogPath)
for _, eventID := range eventIDs {
eventLogFileName := eventutil.EventLogFileName(eventLogPath, eventID)
if err := os.RemoveAll(eventLogFileName); err != nil {
logrus.Warningf("file: %s; remove event log file: %v", eventLogFileName, err)
}
}
}
func (s *ServiceAction) gcTaskBody(tenantID, serviceID string) (map[string]interface{}, error) {
events, err := db.GetManager().ServiceEventDao().ListByTargetID(serviceID)
if err != nil {
@ -2902,6 +2885,49 @@ func (s *ServiceAction) SyncComponentEndpoints(tx *gorm.DB, components []*api_mo
return db.GetManager().ThirdPartySvcDiscoveryCfgDaoTransactions(tx).CreateOrUpdate3rdSvcDiscoveryCfgInBatch(thirdPartySvcDiscoveryCfgs)
}
// Log returns the logs reader for a container in a pod, a pod or a component.
func (s *ServiceAction) Log(w http.ResponseWriter, r *http.Request, component *dbmodel.TenantServices, podName, containerName string, follow bool) error {
// If podName and containerName is missing, return the logs reader for the component
// If containerName is missing, return the logs reader for the pod.
if podName == "" || containerName == "" {
// Only support return the logs reader for a container now.
return errors.WithStack(bcode.NewBadRequest("the field 'podName' and 'containerName' is required"))
}
request := s.kubeClient.CoreV1().Pods(component.TenantID).GetLogs(podName, &corev1.PodLogOptions{
Container: containerName,
Follow: follow,
})
out, err := request.Stream(context.TODO())
if err != nil {
if k8sErrors.IsNotFound(err) {
return errors.Wrap(bcode.ErrPodNotFound, "get pod log")
}
return errors.Wrap(err, "get stream from request")
}
defer out.Close()
w.Header().Set("Transfer-Encoding", "chunked")
w.WriteHeader(http.StatusOK)
// Flush headers, if possible
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
writer := flushwriter.Wrap(w)
_, err = io.Copy(writer, out)
if err != nil {
if strings.HasSuffix(err.Error(), "write: broken pipe") {
return nil
}
logrus.Warningf("write stream to response: %v", err)
}
return nil
}
//TransStatus trans service status
func TransStatus(eStatus string) string {
switch eStatus {
@ -2930,25 +2956,3 @@ func TransStatus(eStatus string) string {
}
return ""
}
//CheckLabel check label
func CheckLabel(serviceID string) bool {
//true for v2, false for v1
serviceLabel, err := db.GetManager().TenantServiceLabelDao().GetTenantServiceLabel(serviceID)
if err != nil {
return false
}
if serviceLabel != nil && len(serviceLabel) > 0 {
return true
}
return false
}
//CheckMapKey CheckMapKey
func CheckMapKey(rebody map[string]interface{}, key string, defaultValue interface{}) map[string]interface{} {
if _, ok := rebody[key]; ok {
return rebody
}
rebody[key] = defaultValue
return rebody
}

View File

@ -248,7 +248,7 @@ func (b *BatchOperationHandler) checkEvents(batchOpReqs model.BatchOpRequesters)
var batchOpResult model.BatchOpResult
for _, req := range batchOpReqs {
req := req
if apiutil.CanDoEvent("", dbmodel.SYNEVENTTYPE, "service", req.GetComponentID()) {
if apiutil.CanDoEvent("", dbmodel.SYNEVENTTYPE, "service", req.GetComponentID(), "") {
validReqs = append(validReqs, req)
continue
}

View File

@ -20,7 +20,7 @@ package handler
import (
"context"
"github.com/jinzhu/gorm"
"net/http"
api_model "github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util"
@ -28,6 +28,7 @@ import (
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/worker/discover/model"
"github.com/goodrain/rainbond/worker/server/pb"
"github.com/jinzhu/gorm"
)
//ServiceHandler service handler
@ -90,16 +91,18 @@ type ServiceHandler interface {
AddServiceMonitor(tenantID, serviceID string, add api_model.AddServiceMonitorRequestStruct) (*dbmodel.TenantServiceMonitor, error)
SyncComponentBase(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentMonitors(tx *gorm.DB,app *dbmodel.Application, components []*api_model.Component) error
SyncComponentMonitors(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentPorts(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentRelations(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentEnvs(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentVolumeRels(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentVolumes(tx *gorm.DB, components []*api_model.Component) error
SyncComponentConfigFiles(tx *gorm.DB, components []*api_model.Component) error
SyncComponentProbes(tx *gorm.DB, components []*api_model.Component) error
SyncComponentLabels(tx *gorm.DB, components []*api_model.Component) error
SyncComponentVolumes(tx *gorm.DB, components []*api_model.Component) error
SyncComponentConfigFiles(tx *gorm.DB, components []*api_model.Component) error
SyncComponentProbes(tx *gorm.DB, components []*api_model.Component) error
SyncComponentLabels(tx *gorm.DB, components []*api_model.Component) error
SyncComponentPlugins(tx *gorm.DB, app *dbmodel.Application, components []*api_model.Component) error
SyncComponentScaleRules(tx *gorm.DB, components []*api_model.Component) error
SyncComponentScaleRules(tx *gorm.DB, components []*api_model.Component) error
SyncComponentEndpoints(tx *gorm.DB, components []*api_model.Component) error
Log(w http.ResponseWriter, r *http.Request, component *dbmodel.TenantServices, podName, containerName string, follow bool) error
}

View File

@ -28,6 +28,7 @@ import (
gclient "github.com/goodrain/rainbond/mq/client"
"github.com/goodrain/rainbond/util"
dmodel "github.com/goodrain/rainbond/worker/discover/model"
"github.com/jinzhu/gorm"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -175,14 +176,11 @@ func (o *OperationHandler) upgrade(batchOpReq model.ComponentOpReq) error {
if err != nil {
return err
}
if dbmodel.ServiceKind(component.Kind) == dbmodel.ServiceKindThirdParty {
return err
}
batchOpReq.SetVersion(component.DeployVersion)
version, err := db.GetManager().VersionInfoDao().GetVersionByDeployVersion(batchOpReq.GetVersion(), batchOpReq.GetComponentID())
if err != nil {
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return err
}
oldDeployVersion := component.DeployVersion
@ -191,13 +189,15 @@ func (o *OperationHandler) upgrade(batchOpReq model.ComponentOpReq) error {
_ = db.GetManager().TenantServiceDao().UpdateModel(component)
}
if version.FinalStatus != "success" {
logrus.Warnf("deploy version %s is not build success,can not change deploy version in this upgrade event", batchOpReq.GetVersion())
} else {
component.DeployVersion = batchOpReq.GetVersion()
err = db.GetManager().TenantServiceDao().UpdateModel(component)
if err != nil {
return err
if version != nil {
if version.FinalStatus != "success" {
logrus.Warnf("deploy version %s is not build success,can not change deploy version in this upgrade event", batchOpReq.GetVersion())
} else {
component.DeployVersion = batchOpReq.GetVersion()
err = db.GetManager().TenantServiceDao().UpdateModel(component)
if err != nil {
return err
}
}
}

View File

@ -154,14 +154,22 @@ func (s *ServiceAction) SetTenantServicePluginRelation(tenantID, serviceID strin
tx.Rollback()
return nil, util.CreateAPIHandleErrorFromDBError("set service plugin env error ", err)
}
tsprCPU := pluginversion.ContainerCPU
tsprMemory := pluginversion.ContainerMemory
if pss.Body.PluginCPU >= 0 {
tsprCPU = pss.Body.PluginCPU
}
if pss.Body.PluginMemory >= 0 {
tsprMemory = pss.Body.PluginMemory
}
relation := &dbmodel.TenantServicePluginRelation{
VersionID: pss.Body.VersionID,
ServiceID: serviceID,
PluginID: pss.Body.PluginID,
Switch: pss.Body.Switch,
PluginModel: plugin.PluginModel,
ContainerCPU: pluginversion.ContainerCPU,
ContainerMemory: pluginversion.ContainerMemory,
ContainerCPU: tsprCPU,
ContainerMemory: tsprMemory,
}
if err := db.GetManager().TenantServicePluginRelationDaoTransactions(tx).AddModel(relation); err != nil {
tx.Rollback()
@ -182,10 +190,10 @@ func (s *ServiceAction) UpdateTenantServicePluginRelation(serviceID string, pss
}
relation.VersionID = pss.Body.VersionID
relation.Switch = pss.Body.Switch
if pss.Body.PluginCPU != 0 {
if pss.Body.PluginCPU >= 0 {
relation.ContainerCPU = pss.Body.PluginCPU
}
if pss.Body.PluginMemory != 0 {
if pss.Body.PluginMemory >= 0 {
relation.ContainerMemory = pss.Body.PluginMemory
}
err = db.GetManager().TenantServicePluginRelationDao().UpdateModel(relation)

View File

@ -359,7 +359,7 @@ func (t *TenantAction) GetTenantsResources(ctx context.Context, tr *api_model.Te
disk = int(mv.Sample.Value() / 1024)
}
if tenantID != "" {
result[tenantID]["disk"] = disk / 1024
result[tenantID]["disk"] = disk
}
}
return result, nil

View File

@ -20,6 +20,7 @@ package handler
import (
"fmt"
"sort"
"strconv"
"strings"
@ -27,13 +28,14 @@ import (
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/util"
"github.com/sirupsen/logrus"
"github.com/goodrain/rainbond/worker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ThirdPartyServiceHanlder handles business logic for all third-party services
type ThirdPartyServiceHanlder struct {
logger *logrus.Entry
dbmanager db.Manager
statusCli *client.AppRuntimeSyncClient
}
@ -41,6 +43,7 @@ type ThirdPartyServiceHanlder struct {
// Create3rdPartySvcHandler creates a new *ThirdPartyServiceHanlder.
func Create3rdPartySvcHandler(dbmanager db.Manager, statusCli *client.AppRuntimeSyncClient) *ThirdPartyServiceHanlder {
return &ThirdPartyServiceHanlder{
logger: logrus.WithField("WHO", "ThirdPartyServiceHanlder"),
dbmanager: dbmanager,
statusCli: statusCli,
}
@ -61,7 +64,6 @@ func (t *ThirdPartyServiceHanlder) AddEndpoints(sid string, d *model.AddEndpiont
ServiceID: sid,
IP: address,
Port: port,
IsOnline: &d.IsOnline,
}
if err := t.dbmanager.EndpointsDao().AddModel(ep); err != nil {
return err
@ -84,7 +86,6 @@ func (t *ThirdPartyServiceHanlder) UpdEndpoints(d *model.UpdEndpiontsReq) error
ep.IP = address
ep.Port = port
}
ep.IsOnline = &d.IsOnline
if err := t.dbmanager.EndpointsDao().UpdateModel(ep); err != nil {
return err
}
@ -132,53 +133,78 @@ func (t *ThirdPartyServiceHanlder) DelEndpoints(epid, sid string) error {
}
// ListEndpoints lists third-party service endpoints.
func (t *ThirdPartyServiceHanlder) ListEndpoints(sid string) ([]*model.EndpointResp, error) {
endpoints, err := t.dbmanager.EndpointsDao().List(sid)
func (t *ThirdPartyServiceHanlder) ListEndpoints(componentID string) ([]*model.ThirdEndpoint, error) {
logger := t.logger.WithField("Method", "ListEndpoints").
WithField("ComponentID", componentID)
runtimeEndpoints, err := t.listRuntimeEndpoints(componentID)
if err != nil {
logrus.Warningf("ServiceID: %s; error listing endpoints from db; %v", sid, err)
logger.Warning(err.Error())
}
m := make(map[string]*model.EndpointResp)
for _, item := range endpoints {
ep := &model.EndpointResp{
EpID: item.UUID,
Address: func(ip string, p int) string {
if p != 0 {
return fmt.Sprintf("%s:%d", ip, p)
}
return ip
}(item.IP, item.Port),
staticEndpoints, err := t.listStaticEndpoints(componentID)
if err != nil {
staticEndpoints = map[string]*model.ThirdEndpoint{}
logger.Warning(err.Error())
}
// Merge runtimeEndpoints with staticEndpoints
for _, ep := range runtimeEndpoints {
sep, ok := staticEndpoints[ep.EpID]
if !ok {
continue
}
ep.IsStatic = sep.IsStatic
ep.Address = sep.Address
delete(staticEndpoints, ep.EpID)
}
// Add offline static endpoints
for _, ep := range staticEndpoints {
runtimeEndpoints = append(runtimeEndpoints, ep)
}
sort.Sort(model.ThirdEndpoints(runtimeEndpoints))
return runtimeEndpoints, nil
}
func (t *ThirdPartyServiceHanlder) listRuntimeEndpoints(componentID string) ([]*model.ThirdEndpoint, error) {
runtimeEndpoints, err := t.statusCli.ListThirdPartyEndpoints(componentID)
if err != nil {
return nil, errors.Wrap(err, "list runtime third endpoints")
}
var endpoints []*model.ThirdEndpoint
for _, item := range runtimeEndpoints.Items {
endpoints = append(endpoints, &model.ThirdEndpoint{
EpID: item.Name,
Address: item.Address,
Status: item.Status,
})
}
return endpoints, nil
}
func (t *ThirdPartyServiceHanlder) listStaticEndpoints(componentID string) (map[string]*model.ThirdEndpoint, error) {
staticEndpoints, err := t.dbmanager.EndpointsDao().List(componentID)
if err != nil {
return nil, errors.Wrap(err, "list static endpoints")
}
endpoints := make(map[string]*model.ThirdEndpoint)
for _, item := range staticEndpoints {
address := func(ip string, p int) string {
if p != 0 {
return fmt.Sprintf("%s:%d", ip, p)
}
return ip
}(item.IP, item.Port)
endpoints[item.UUID] = &model.ThirdEndpoint{
EpID: item.UUID,
Address: address,
Status: "-",
IsOnline: false,
IsStatic: true,
}
m[ep.Address] = ep
}
thirdPartyEndpoints, err := t.statusCli.ListThirdPartyEndpoints(sid)
if err != nil {
logrus.Warningf("ServiceID: %s; grpc; error listing third-party endpoints: %v", sid, err)
return nil, err
}
if thirdPartyEndpoints != nil && thirdPartyEndpoints.Obj != nil {
for _, item := range thirdPartyEndpoints.Obj {
ep := m[fmt.Sprintf("%s:%d", item.Ip, item.Port)]
if ep != nil {
ep.IsOnline = true
ep.Status = item.Status
continue
}
rep := &model.EndpointResp{
EpID: item.Uuid,
Address: item.Ip,
Status: item.Status,
IsOnline: true,
IsStatic: false,
}
m[rep.Address] = rep
}
}
var res []*model.EndpointResp
for _, item := range m {
res = append(res, item)
}
return res, nil
return endpoints, nil
}

View File

@ -236,6 +236,13 @@ func (w *resWriter) WriteHeader(statusCode int) {
// WrapEL wrap eventlog, handle event log before and after process
func WrapEL(f http.HandlerFunc, target, optType string, synType int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var serviceKind string
serviceObj := r.Context().Value(ctxutil.ContextKey("service"))
if serviceObj != nil {
service := serviceObj.(*dbmodel.TenantServices)
serviceKind = service.Kind
}
if r.Method != "GET" {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
@ -261,7 +268,7 @@ func WrapEL(f http.HandlerFunc, target, optType string, synType int) http.Handle
}
//eventLog check the latest event
if !util.CanDoEvent(optType, synType, target, targetID) {
if !util.CanDoEvent(optType, synType, target, targetID, serviceKind) {
logrus.Errorf("operation too frequently. uri: %s; target: %s; target id: %s", r.RequestURI, target, targetID)
httputil.ReturnError(r, w, 409, "操作过于频繁,请稍后再试") // status code 409 conflict
return

View File

@ -2,8 +2,9 @@ package model
import (
"fmt"
dbmodel "github.com/goodrain/rainbond/db/model"
"time"
dbmodel "github.com/goodrain/rainbond/db/model"
)
// ComponentBase -
@ -168,6 +169,7 @@ type ComponentVolume struct {
ReclaimPolicy string `json:"reclaim_policy"`
AllowExpansion bool `json:"allow_expansion"`
VolumeProviderName string `json:"volume_provider_name"`
Mode *int32 `json:"mode"`
}
// Key returns the key of ComponentVolume.
@ -192,6 +194,7 @@ func (v *ComponentVolume) DbModel(componentID string) *dbmodel.TenantServiceVolu
ReclaimPolicy: v.ReclaimPolicy,
AllowExpansion: v.AllowExpansion,
VolumeProviderName: v.VolumeProviderName,
Mode: v.Mode,
}
}
@ -239,6 +242,7 @@ type Component struct {
ComponentBase ComponentBase `json:"component_base"`
HTTPRules []AddHTTPRuleStruct `json:"http_rules"`
TCPRules []AddTCPRuleStruct `json:"tcp_rules"`
HTTPRuleConfigs []HTTPRuleConfig `json:"http_rule_configs"`
Monitors []AddServiceMonitorRequestStruct `json:"monitors"`
Ports []TenantServicesPort `json:"ports"`
Relations []TenantComponentRelation `json:"relations"`

View File

@ -19,8 +19,10 @@
package model
import (
dbmodel "github.com/goodrain/rainbond/db/model"
"strconv"
"strings"
dbmodel "github.com/goodrain/rainbond/db/model"
)
//AddHTTPRuleStruct is used to add http rule, certificate and rule extensions
@ -171,6 +173,79 @@ type Body struct {
ProxyBuffering string `json:"proxy_buffering,omitempty" validate:"proxy_buffering|required"`
}
// HTTPRuleConfig -
type HTTPRuleConfig struct {
RuleID string `json:"rule_id,omitempty" validate:"rule_id|required"`
ProxyConnectTimeout int `json:"proxy_connect_timeout,omitempty" validate:"proxy_connect_timeout|required"`
ProxySendTimeout int `json:"proxy_send_timeout,omitempty" validate:"proxy_send_timeout|required"`
ProxyReadTimeout int `json:"proxy_read_timeout,omitempty" validate:"proxy_read_timeout|required"`
ProxyBodySize int `json:"proxy_body_size,omitempty" validate:"proxy_body_size|required"`
SetHeaders []*SetHeader `json:"set_headers,omitempty" `
Rewrites []*Rewrite `json:"rewrite,omitempty"`
ProxyBufferSize int `json:"proxy_buffer_size,omitempty" validate:"proxy_buffer_size|numeric_between:1,65535"`
ProxyBufferNumbers int `json:"proxy_buffer_numbers,omitempty" validate:"proxy_buffer_size|numeric_between:1,65535"`
ProxyBuffering string `json:"proxy_buffering,omitempty" validate:"proxy_buffering|required"`
}
// DbModel return database model
func (h *HTTPRuleConfig) DbModel() []*dbmodel.GwRuleConfig {
var configs []*dbmodel.GwRuleConfig
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-connect-timeout",
Value: strconv.Itoa(h.ProxyConnectTimeout),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-send-timeout",
Value: strconv.Itoa(h.ProxySendTimeout),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-read-timeout",
Value: strconv.Itoa(h.ProxyReadTimeout),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-body-size",
Value: strconv.Itoa(h.ProxyBodySize),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-buffer-size",
Value: strconv.Itoa(h.ProxyBufferSize),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-buffer-numbers",
Value: strconv.Itoa(h.ProxyBufferNumbers),
})
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: "proxy-buffering",
Value: h.ProxyBuffering,
})
setheaders := make(map[string]string)
for _, item := range h.SetHeaders {
if strings.TrimSpace(item.Key) == "" {
continue
}
if strings.TrimSpace(item.Value) == "" {
item.Value = "empty"
}
// filter same key
setheaders["set-header-"+item.Key] = item.Value
}
for k, v := range setheaders {
configs = append(configs, &dbmodel.GwRuleConfig{
RuleID: h.RuleID,
Key: k,
Value: v,
})
}
return configs
}
//SetHeader set header
type SetHeader struct {
Key string `json:"item_key"`

View File

@ -20,15 +20,13 @@ package model
// AddEndpiontsReq is one of the Endpoints in the request to add the endpints.
type AddEndpiontsReq struct {
Address string `json:"address" validate:"address|required"`
IsOnline bool `json:"is_online" validate:"required"`
Address string `json:"address" validate:"address|required"`
}
// UpdEndpiontsReq is one of the Endpoints in the request to update the endpints.
type UpdEndpiontsReq struct {
EpID string `json:"ep_id" validate:"required|len:32"`
Address string `json:"address"`
IsOnline bool `json:"is_online" validate:"required"`
EpID string `json:"ep_id" validate:"required|len:32"`
Address string `json:"address"`
}
// DelEndpiontsReq is one of the Endpoints in the request to update the endpints.
@ -36,16 +34,33 @@ type DelEndpiontsReq struct {
EpID string `json:"ep_id" validate:"required|len:32"`
}
// EndpointResp is one of the Endpoints list in the response to list, add,
// ThirdEndpoint is one of the Endpoints list in the response to list, add,
// update or delete the endpints.
type EndpointResp struct {
type ThirdEndpoint struct {
EpID string `json:"ep_id"`
Address string `json:"address"`
Status string `json:"status"`
IsOnline bool `json:"is_online"`
IsStatic bool `json:"is_static"`
}
// ThirdEndpoints -
type ThirdEndpoints []*ThirdEndpoint
// Len is part of sort.Interface.
func (e ThirdEndpoints) Len() int {
return len(e)
}
// Swap is part of sort.Interface.
func (e ThirdEndpoints) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
// Less is part of sort.Interface. It is implemented by calling the "by" closure in the sorter.
func (e ThirdEndpoints) Less(i, j int) bool {
return e[i].Address < e[j].Address
}
// ThridPartyServiceProbe is the json obejct in the request
// to update or fetch the ThridPartyServiceProbe.
type ThridPartyServiceProbe struct {

View File

@ -60,7 +60,8 @@ type AddVolumeStruct struct {
// ReclaimPolicy 回收策略
ReclaimPolicy string `json:"reclaim_policy"`
// AllowExpansion 是否支持扩展
AllowExpansion bool `json:"allow_expansion"`
AllowExpansion bool `json:"allow_expansion"`
Mode *int32 `json:"mode"`
}
}
@ -243,6 +244,7 @@ type UpdVolumeReq struct {
VolumeType string `json:"volume_type" validate:"volume_type|required"`
FileContent string `json:"file_content"`
VolumePath string `json:"volume_path" validate:"volume_path|required"`
Mode *int32 `json:"mode"`
}
// VolumeWithStatusResp volume status

View File

@ -12,4 +12,5 @@ var (
ErrSyncOperation = newByMessage(409, 10103, "The asynchronous operation is executing")
// ErrHorizontalDueToNoChange
ErrHorizontalDueToNoChange = newByMessage(400, 10104, "The number of components has not changed, no need to scale")
ErrPodNotFound = newByMessage(404, 10105, "pod not found")
)

View File

@ -29,8 +29,8 @@ import (
)
// CanDoEvent check can do event or not
func CanDoEvent(optType string, synType int, target, targetID string) bool {
if synType == dbmodel.SYNEVENTTYPE {
func CanDoEvent(optType string, synType int, target, targetID string, componentKind string) bool {
if synType == dbmodel.SYNEVENTTYPE || componentKind == "third_party" {
return true
}
event, err := db.GetManager().ServiceEventDao().GetLastASyncEvent(target, targetID)

View File

@ -70,6 +70,7 @@ func NewExportApp(in []byte, m *exectorManager) (TaskWorker, error) {
//Run Run
func (i *ExportApp) Run(timeout time.Duration) error {
defer os.RemoveAll(i.SourceDir)
// disable Md5 checksum
// if ok := i.isLatest(); ok {
// i.updateStatus("success")

View File

@ -118,6 +118,9 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
if err := util.CheckAndCreateDir(cacheDir); err != nil {
return fmt.Errorf("create cache dir error %s", err.Error())
}
// delete the cache data
defer b.deleteCache(cacheDir)
b.cacheDir = cacheDir
switch backup.BackupMode {
case "full-online":
@ -180,6 +183,23 @@ func (b *BackupAPPRestore) Run(timeout time.Duration) error {
return nil
}
func (b *BackupAPPRestore) deleteCache(dir string) error {
logrus.Infof("delete cache %s", dir)
return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
// do not delete the metadata file
if strings.HasSuffix(path, "console_apps_metadata.json") {
return nil
}
return os.RemoveAll(path)
})
}
func (b *BackupAPPRestore) restoreVersionAndData(backup *dbmodel.AppBackup, appSnapshot *AppSnapshot) error {
for _, app := range appSnapshot.Services {
//backup app image or code slug file

View File

@ -32,7 +32,6 @@ import (
"github.com/goodrain/rainbond/pkg/generated/clientset/versioned"
etcdutil "github.com/goodrain/rainbond/util/etcd"
k8sutil "github.com/goodrain/rainbond/util/k8s"
"github.com/goodrain/rainbond/worker/appm"
"github.com/goodrain/rainbond/worker/appm/componentdefinition"
"github.com/goodrain/rainbond/worker/appm/controller"
"github.com/goodrain/rainbond/worker/appm/store"
@ -98,15 +97,8 @@ func Run(s *option.Worker) error {
componentdefinition.NewComponentDefinitionBuilder(s.Config.RBDNamespace)
//step 4: create component resource store
startCh := channels.NewRingChannel(1024)
updateCh := channels.NewRingChannel(1024)
probeCh := channels.NewRingChannel(1024)
cachestore := store.NewStore(restConfig, clientset, rainbondClient, db.GetManager(), s.Config, startCh, probeCh)
appmController := appm.NewAPPMController(clientset, cachestore, startCh, updateCh, probeCh)
if err := appmController.Start(); err != nil {
logrus.Errorf("error starting appm controller: %v", err)
}
defer appmController.Stop()
cachestore := store.NewStore(restConfig, clientset, rainbondClient, db.GetManager(), s.Config)
if err := cachestore.Start(); err != nil {
logrus.Error("start kube cache store error", err)
return err
@ -128,7 +120,7 @@ func Run(s *option.Worker) error {
//step 7 : create discover module
garbageCollector := gc.NewGarbageCollector(clientset)
taskManager := discover.NewTaskManager(s.Config, cachestore, controllerManager, garbageCollector, startCh)
taskManager := discover.NewTaskManager(s.Config, cachestore, controllerManager, garbageCollector)
if err := taskManager.Start(); err != nil {
return err
}

View File

@ -19,7 +19,7 @@ spec:
status: {}
validation:
openAPIV3Schema:
description: HelmApp -
description: ThirdComponent -
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
@ -34,12 +34,14 @@ spec:
metadata:
type: object
spec:
description: ThirdComponentSpec -
properties:
endpointSource:
description: endpoint source config
properties:
endpoints:
items:
description: ThirdComponentEndpoint -
properties:
address:
description: The address including the port number.
@ -48,6 +50,9 @@ spec:
description: Specify a private certificate when the protocol
is HTTPS
type: string
name:
description: Then Name of the Endpoint.
type: string
protocol:
description: 'Address protocols, including: HTTP, TCP, UDP,
HTTPS'
@ -57,6 +62,7 @@ spec:
type: object
type: array
kubernetesService:
description: KubernetesServiceSource -
properties:
name:
type: string
@ -91,6 +97,12 @@ spec:
probe:
description: health check probe
properties:
failureThreshold:
description: Minimum consecutive failures for the probe to be considered
failed after having succeeded. Defaults to 3. Minimum value is
1.
format: int32
type: integer
httpGet:
description: HTTPGet specifies the http request to perform.
properties:
@ -116,17 +128,33 @@ spec:
description: Path to access on the HTTP server.
type: string
type: object
periodSeconds:
description: How often (in seconds) to perform the probe. Default
to 10 seconds. Minimum value is 1.
format: int32
type: integer
successThreshold:
description: Minimum consecutive successes for the probe to be considered
successful after having failed.
format: int32
type: integer
tcpSocket:
description: 'TCPSocket specifies an action involving a TCP port.
TCP hooks not yet supported TODO: implement a realistic TCP lifecycle
hook'
type: object
timeoutSeconds:
description: 'Number of seconds after which the probe times out.
Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
format: int32
type: integer
type: object
required:
- endpointSource
- ports
type: object
status:
description: ThirdComponentStatus -
properties:
endpoints:
items:
@ -135,6 +163,9 @@ spec:
address:
description: The address including the port number.
type: string
name:
description: Then Name of the Endpoint.
type: string
reason:
description: Reason probe not passed reason
type: string
@ -187,6 +218,7 @@ spec:
type: object
type: array
phase:
description: ComponentPhase -
type: string
reason:
type: string

View File

@ -505,6 +505,7 @@ type RuleExtensionDao interface {
GetRuleExtensionByRuleID(ruleID string) ([]*model.RuleExtension, error)
DeleteRuleExtensionByRuleID(ruleID string) error
DeleteByRuleIDs(ruleIDs []string) error
CreateOrUpdateRuleExtensionsInBatch(exts []*model.RuleExtension) error
}
// HTTPRuleDao -
@ -521,6 +522,7 @@ type HTTPRuleDao interface {
DeleteByComponentPort(componentID string, port int) error
DeleteByComponentIDs(componentIDs []string) error
CreateOrUpdateHTTPRuleInBatch(httpRules []*model.HTTPRule) error
ListByComponentIDs(componentIDs []string) ([]*model.HTTPRule, error)
}
// TCPRuleDao -
@ -545,7 +547,6 @@ type EndpointsDao interface {
GetByUUID(uuid string) (*model.Endpoint, error)
DelByUUID(uuid string) error
List(sid string) ([]*model.Endpoint, error)
ListIsOnline(sid string) ([]*model.Endpoint, error)
DeleteByServiceID(sid string) error
}
@ -566,6 +567,7 @@ type GwRuleConfigDao interface {
DeleteByRuleID(rid string) error
ListByRuleID(rid string) ([]*model.GwRuleConfig, error)
DeleteByRuleIDs(ruleIDs []string) error
CreateOrUpdateGwRuleConfigsInBatch(ruleConfigs []*model.GwRuleConfig) error
}
// TenantServceAutoscalerRulesDao -

View File

@ -89,9 +89,9 @@ type TenantPluginBuildVersion struct {
Info string `gorm:"column:info" json:"info"`
Status string `gorm:"column:status;size:24" json:"status"`
// container default cpu
ContainerCPU int `gorm:"column:container_cpu;default:125" json:"container_cpu"`
ContainerCPU int `gorm:"column:container_cpu;default:0" json:"container_cpu"`
// container default memory
ContainerMemory int `gorm:"column:container_memory;default:64" json:"container_memory"`
ContainerMemory int `gorm:"column:container_memory;default:0" json:"container_memory"`
// container args
ContainerCMD string `gorm:"column:container_cmd;size:2048" json:"container_cmd"`
}
@ -155,9 +155,9 @@ type TenantServicePluginRelation struct {
ServiceID string `gorm:"column:service_id;size:32" json:"service_id"`
PluginModel string `gorm:"column:plugin_model;size:24" json:"plugin_model"`
// container default cpu v3.5.1 add
ContainerCPU int `gorm:"column:container_cpu;default:125" json:"container_cpu"`
ContainerCPU int `gorm:"column:container_cpu;default:0" json:"container_cpu"`
// container default memory v3.5.1 add
ContainerMemory int `gorm:"column:container_memory;default:64" json:"container_memory"`
ContainerMemory int `gorm:"column:container_memory;default:0" json:"container_memory"`
Switch bool `gorm:"column:switch;default:0" json:"switch"`
}

View File

@ -165,9 +165,11 @@ type TenantServices struct {
// 服务描述
Comment string `gorm:"column:comment" json:"comment"`
// 容器CPU权重
ContainerCPU int `gorm:"column:container_cpu;default:500" json:"container_cpu"`
// default is 0, This means that CPU resources are not limited
ContainerCPU int `gorm:"column:container_cpu;default:0" json:"container_cpu"`
// 容器最大内存
ContainerMemory int `gorm:"column:container_memory;default:128" json:"container_memory"`
// default is 0, This means that Memory resources are not limited
ContainerMemory int `gorm:"column:container_memory;default:0" json:"container_memory"`
// container GPU, The amount of video memory applied for GPU. The unit is MiB
// default is 0, That means no GPU is required
ContainerGPU int `gorm:"column:container_gpu;default:0" json:"container_gpu"`
@ -476,6 +478,7 @@ type TenantServiceVolume struct {
AllowExpansion bool `gorm:"column:allow_expansion" json:"allow_expansion"`
// VolumeProviderName 使用的存储驱动别名
VolumeProviderName string `gorm:"column:volume_provider_name" json:"volume_provider_name"`
Mode *int32 `gorm:"column:mode" json:"mode"`
}
//TableName 表名

View File

@ -18,6 +18,10 @@
package model
import (
"fmt"
)
// Endpoint is a persistent object for table 3rd_party_svc_endpoints.
type Endpoint struct {
Model
@ -25,8 +29,6 @@ type Endpoint struct {
ServiceID string `gorm:"column:service_id;size:32;not null" json:"service_id"`
IP string `gorm:"column:ip;not null" json:"ip"`
Port int `gorm:"column:port;size:65535" json:"port"`
//use pointer type, zero values won't be saved into database
IsOnline *bool `gorm:"column:is_online;default:true" json:"is_online"`
}
// TableName returns table name of Endpoint.
@ -34,6 +36,14 @@ func (Endpoint) TableName() string {
return "tenant_service_3rd_party_endpoints"
}
// GetAddress -
func (e *Endpoint) GetAddress() string {
if e.Port == 0 {
return e.IP
}
return fmt.Sprintf("%s:%d", e.IP, e.Port)
}
// DiscorveryType type of service discovery center.
type DiscorveryType string

View File

@ -85,18 +85,6 @@ func (e *EndpointDaoImpl) List(sid string) ([]*model.Endpoint, error) {
return eps, nil
}
// ListIsOnline lists *model.Endpoint according to sid, and filter out the ones that are not online.
func (e *EndpointDaoImpl) ListIsOnline(sid string) ([]*model.Endpoint, error) {
var eps []*model.Endpoint
if err := e.DB.Where("service_id=? and is_online=1", sid).Find(&eps).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, err
}
return eps, nil
}
// DelByUUID deletes endpoints matching uuid.
func (e *EndpointDaoImpl) DelByUUID(uuid string) error {
if err := e.DB.Where("uuid=?", uuid).Delete(&model.Endpoint{}).Error; err != nil {

View File

@ -20,9 +20,9 @@ package dao
import (
"fmt"
gormbulkups "github.com/atcdot/gorm-bulk-upsert"
"reflect"
gormbulkups "github.com/atcdot/gorm-bulk-upsert"
"github.com/goodrain/rainbond/api/util/bcode"
"github.com/goodrain/rainbond/db/model"
"github.com/jinzhu/gorm"
@ -149,6 +149,18 @@ func (c *RuleExtensionDaoImpl) DeleteByRuleIDs(ruleIDs []string) error {
return nil
}
// CreateOrUpdateRuleExtensionsInBatch -
func (c *RuleExtensionDaoImpl) CreateOrUpdateRuleExtensionsInBatch(exts []*model.RuleExtension) error {
var objects []interface{}
for _, ext := range exts {
objects = append(objects, *ext)
}
if err := gormbulkups.BulkUpsert(c.DB, objects, 2000); err != nil {
return errors.Wrap(err, "create or update rule extensions in batch")
}
return nil
}
//HTTPRuleDaoImpl http rule
type HTTPRuleDaoImpl struct {
DB *gorm.DB
@ -276,7 +288,7 @@ func (h *HTTPRuleDaoImpl) ListByCertID(certID string) ([]*model.HTTPRule, error)
}
//DeleteByComponentIDs delete http rule by component ids
func (h *HTTPRuleDaoImpl) DeleteByComponentIDs(componentIDs []string) error{
func (h *HTTPRuleDaoImpl) DeleteByComponentIDs(componentIDs []string) error {
return h.DB.Where("service_id in (?) ", componentIDs).Delete(&model.HTTPRule{}).Error
}
@ -292,6 +304,15 @@ func (h *HTTPRuleDaoImpl) CreateOrUpdateHTTPRuleInBatch(httpRules []*model.HTTPR
return nil
}
// ListByComponentIDs -
func (h *HTTPRuleDaoImpl) ListByComponentIDs(componentIDs []string) ([]*model.HTTPRule, error) {
var rules []*model.HTTPRule
if err := h.DB.Where("service_id in (?) ", componentIDs).Find(&rules).Error; err != nil {
return nil, err
}
return rules, nil
}
// TCPRuleDaoTmpl is a implementation of TcpRuleDao
type TCPRuleDaoTmpl struct {
DB *gorm.DB
@ -407,7 +428,7 @@ func (t *TCPRuleDaoTmpl) ListByServiceID(serviceID string) ([]*model.TCPRule, er
}
//DeleteByComponentIDs delete tcp rule by component ids
func (t *TCPRuleDaoTmpl) DeleteByComponentIDs(componentIDs []string) error{
func (t *TCPRuleDaoTmpl) DeleteByComponentIDs(componentIDs []string) error {
return t.DB.Where("service_id in (?) ", componentIDs).Delete(&model.TCPRule{}).Error
}
@ -470,3 +491,15 @@ func (t *GwRuleConfigDaoImpl) DeleteByRuleIDs(ruleIDs []string) error {
}
return nil
}
// CreateOrUpdateGwRuleConfigsInBatch creates or updates rule configs in batch.
func (t *GwRuleConfigDaoImpl) CreateOrUpdateGwRuleConfigsInBatch(ruleConfigs []*model.GwRuleConfig) error {
var objects []interface{}
for _, ruleConfig := range ruleConfigs {
objects = append(objects, *ruleConfig)
}
if err := gormbulkups.BulkUpsert(t.DB, objects, 2000); err != nil {
return errors.Wrap(err, "create or update rule configs in batch")
}
return nil
}

View File

@ -32,7 +32,7 @@ import (
"github.com/goodrain/rainbond/util/ingress-nginx/ingress/errors"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -74,7 +74,7 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor {
}
// Extract extracts the annotations from an Ingress
func (e Extractor) Extract(ing *extensions.Ingress) *Ingress {
func (e Extractor) Extract(ing *networkingv1.Ingress) *Ingress {
pia := &Ingress{
ObjectMeta: ing.ObjectMeta,
}

View File

@ -21,10 +21,11 @@ package cookie
import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"strings"
)
// Config -
type Config struct {
Cookie map[string]string `json:"cookie"`
}
@ -33,11 +34,12 @@ type cookie struct {
r resolver.Resolver
}
// NewParser -
func NewParser(r resolver.Resolver) parser.IngressAnnotation {
return cookie{r}
}
func (c cookie) Parse(ing *extensions.Ingress) (interface{}, error) {
func (c cookie) Parse(ing *networkingv1.Ingress) (interface{}, error) {
co, err := parser.GetStringAnnotation("cookie", ing)
if err != nil {
return nil, err

View File

@ -20,18 +20,18 @@ package cookie
import (
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func buildIngress() *extensions.Ingress {
func buildIngress() *networkingv1.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -21,10 +21,11 @@ package header
import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"strings"
)
// Config -
type Config struct {
Header map[string]string `json:"header"`
}
@ -33,11 +34,12 @@ type header struct {
r resolver.Resolver
}
// NewParser -
func NewParser(r resolver.Resolver) parser.IngressAnnotation {
return header{r}
}
func (h header) Parse(ing *extensions.Ingress) (interface{}, error) {
func (h header) Parse(ing *networkingv1.Ingress) (interface{}, error) {
hr, err := parser.GetStringAnnotation("header", ing)
if err != nil {
return nil, err

View File

@ -20,18 +20,18 @@ package header
import (
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func buildIngress() *extensions.Ingress {
func buildIngress() *networkingv1.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -22,9 +22,10 @@ import (
"fmt"
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
// Config -
type Config struct {
L4Enable bool
L4Host string
@ -35,11 +36,12 @@ type l4 struct {
r resolver.Resolver
}
// NewParser -
func NewParser(r resolver.Resolver) parser.IngressAnnotation {
return l4{r}
}
func (l l4) Parse(ing *extensions.Ingress) (interface{}, error) {
func (l l4) Parse(ing *networkingv1.Ingress) (interface{}, error) {
l4Enable, _ := parser.GetBoolAnnotation("l4-enable", ing)
l4Host, _ := parser.GetStringAnnotation("l4-host", ing)
if l4Host == "" {

View File

@ -20,18 +20,18 @@ package l4
import (
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func buildIngress() *extensions.Ingress {
func buildIngress() *networkingv1.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -21,7 +21,7 @@ package lbtype
import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
type lbtype struct {
@ -36,6 +36,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
// Parse parses the annotations contained in the ingress rule
// used to indicate if the location/s contains a fragment of
// configuration to be included inside the paths of the rules
func (a lbtype) Parse(ing *extensions.Ingress) (interface{}, error) {
func (a lbtype) Parse(ing *networkingv1.Ingress) (interface{}, error) {
return parser.GetStringAnnotation("lb-type", ing)
}

View File

@ -22,8 +22,7 @@ import (
"strings"
"github.com/goodrain/rainbond/util/ingress-nginx/ingress/errors"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
var (
@ -33,7 +32,7 @@ var (
// IngressAnnotation has a method to parse annotations located in Ingress
type IngressAnnotation interface {
Parse(ing *extensions.Ingress) (interface{}, error)
Parse(ing *networkingv1.Ingress) (interface{}, error)
}
type ingAnnotations map[string]string
@ -70,7 +69,7 @@ func (a ingAnnotations) parseInt(name string) (int, error) {
return 0, errors.ErrMissingAnnotations
}
func checkAnnotation(name string, ing *extensions.Ingress) error {
func checkAnnotation(name string, ing *networkingv1.Ingress) error {
if ing == nil || len(ing.GetAnnotations()) == 0 {
return errors.ErrMissingAnnotations
}
@ -82,7 +81,7 @@ func checkAnnotation(name string, ing *extensions.Ingress) error {
}
// GetBoolAnnotation extracts a boolean from an Ingress annotation
func GetBoolAnnotation(name string, ing *extensions.Ingress) (bool, error) {
func GetBoolAnnotation(name string, ing *networkingv1.Ingress) (bool, error) {
v := GetAnnotationWithPrefix(name)
err := checkAnnotation(v, ing)
if err != nil {
@ -92,7 +91,7 @@ func GetBoolAnnotation(name string, ing *extensions.Ingress) (bool, error) {
}
// GetStringAnnotation extracts a string from an Ingress annotation
func GetStringAnnotation(name string, ing *extensions.Ingress) (string, error) {
func GetStringAnnotation(name string, ing *networkingv1.Ingress) (string, error) {
v := GetAnnotationWithPrefix(name)
err := checkAnnotation(v, ing)
if err != nil {
@ -102,7 +101,7 @@ func GetStringAnnotation(name string, ing *extensions.Ingress) (string, error) {
}
// GetIntAnnotation extracts an int from an Ingress annotation
func GetIntAnnotation(name string, ing *extensions.Ingress) (int, error) {
func GetIntAnnotation(name string, ing *networkingv1.Ingress) (int, error) {
v := GetAnnotationWithPrefix(name)
err := checkAnnotation(v, ing)
if err != nil {
@ -113,7 +112,7 @@ func GetIntAnnotation(name string, ing *extensions.Ingress) (int, error) {
// GetStringAnnotationWithPrefix extracts an string from an Ingress annotation
// based on the annotation prefix
func GetStringAnnotationWithPrefix(prefix string, ing *extensions.Ingress) (map[string]string, error) {
func GetStringAnnotationWithPrefix(prefix string, ing *networkingv1.Ingress) (map[string]string, error) {
v := GetAnnotationWithPrefix(prefix)
err := checkAnnotation(v, ing)
if err != nil {

View File

@ -20,12 +20,12 @@ import (
"testing"
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func buildIngress() *extensions.Ingress {
return &extensions.Ingress{
func buildIngress() *networkingv1.Ingress {
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -26,7 +26,7 @@ import (
"github.com/goodrain/rainbond/gateway/controller/config"
"github.com/sirupsen/logrus"
"golang.org/x/net/http/httpguts"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
// Config returns the proxy timeout to use in the upstream server/s
@ -191,7 +191,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
// ParseAnnotations parses the annotations contained in the ingress
// rule used to configure upstream check parameters
func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) {
func (a proxy) Parse(ing *networkingv1.Ingress) (interface{}, error) {
defBackend := a.r.GetDefaultBackend()
config := &Config{}

View File

@ -20,7 +20,7 @@ import (
"testing"
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@ -29,13 +29,13 @@ import (
"github.com/goodrain/rainbond/gateway/defaults"
)
func buildIngress() *extensions.Ingress {
func buildIngress() *networkingv1.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -22,7 +22,7 @@ import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
"github.com/sirupsen/logrus"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
// Config describes the per location redirect config
@ -85,7 +85,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
// ParseAnnotations parses the annotations contained in the ingress
// rule used to rewrite the defined paths
func (a rewrite) Parse(ing *extensions.Ingress) (interface{}, error) {
func (a rewrite) Parse(ing *networkingv1.Ingress) (interface{}, error) {
var err error
config := &Config{}

View File

@ -23,7 +23,7 @@ import (
"testing"
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
@ -32,13 +32,13 @@ const (
defRoute = "/demo"
)
func buildIngress() *extensions.Ingress {
func buildIngress() *networkingv1.Ingress {
defaultBackend := extensions.IngressBackend{
ServiceName: "default-backend",
ServicePort: intstr.FromInt(80),
}
return &extensions.Ingress{
return &networkingv1.Ingress{
ObjectMeta: meta_v1.ObjectMeta{
Name: "foo",
Namespace: api.NamespaceDefault,

View File

@ -19,7 +19,7 @@ package upstreamhashby
import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
type upstreamhashby struct {
@ -34,6 +34,6 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
// Parse parses the annotations contained in the ingress rule
// used to indicate if the location/s contains a fragment of
// configuration to be included inside the paths of the rules
func (a upstreamhashby) Parse(ing *extensions.Ingress) (interface{}, error) {
func (a upstreamhashby) Parse(ing *networkingv1.Ingress) (interface{}, error) {
return parser.GetStringAnnotation("upstream-hash-by", ing)
}

View File

@ -22,7 +22,7 @@ import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/annotations/resolver"
"github.com/sirupsen/logrus"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"strconv"
)
@ -40,7 +40,7 @@ func NewParser(r resolver.Resolver) parser.IngressAnnotation {
return weight{r}
}
func (c weight) Parse(ing *extensions.Ingress) (interface{}, error) {
func (c weight) Parse(ing *networkingv1.Ingress) (interface{}, error) {
wstr, err := parser.GetStringAnnotation("weight", ing)
var w int
if err != nil || wstr == "" {

View File

@ -22,14 +22,14 @@ import (
"fmt"
"github.com/goodrain/rainbond/util/ingress-nginx/k8s"
"k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
)
type secretIngressMap struct {
v map[string][]string
}
func (m *secretIngressMap) update(ing *v1beta1.Ingress) {
func (m *secretIngressMap) update(ing *networkingv1.Ingress) {
ingKey := k8s.MetaNamespaceKey(ing)
for _, tls := range ing.Spec.TLS {
secretKey := fmt.Sprintf("%s/%s", ing.Namespace, tls.SecretName)

View File

@ -31,13 +31,12 @@ import (
"strings"
"sync"
"github.com/goodrain/rainbond/gateway/cluster"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/cmd/gateway/option"
"github.com/goodrain/rainbond/gateway/annotations"
"github.com/goodrain/rainbond/gateway/annotations/l4"
"github.com/goodrain/rainbond/gateway/annotations/rewrite"
"github.com/goodrain/rainbond/gateway/cluster"
"github.com/goodrain/rainbond/gateway/controller/config"
"github.com/goodrain/rainbond/gateway/defaults"
"github.com/goodrain/rainbond/gateway/util"
@ -47,8 +46,7 @@ import (
ik8s "github.com/goodrain/rainbond/util/ingress-nginx/k8s"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
@ -89,7 +87,7 @@ type Storer interface {
// list virtual service
ListVirtualService() ([]*v1.VirtualService, []*v1.VirtualService)
ListIngresses() []*extensions.Ingress
ListIngresses() []*networkingv1.Ingress
GetIngressAnnotations(key string) (*annotations.Ingress, error)
@ -177,7 +175,7 @@ func New(client kubernetes.Interface,
options.LabelSelector = "creator=Rainbond"
})
store.informers.Ingress = store.sharedInformer.Extensions().V1beta1().Ingresses().Informer()
store.informers.Ingress = store.sharedInformer.Networking().V1().Ingresses().Informer()
store.listers.Ingress.Store = store.informers.Ingress.GetStore()
store.informers.Service = store.sharedInformer.Core().V1().Services().Informer()
@ -191,7 +189,7 @@ func New(client kubernetes.Interface,
ingEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ing := obj.(*extensions.Ingress)
ing := obj.(*networkingv1.Ingress)
// updating annotations information for ingress
store.extractAnnotations(ing)
@ -212,8 +210,8 @@ func New(client kubernetes.Interface,
}
},
UpdateFunc: func(old, cur interface{}) {
oldIng := old.(*extensions.Ingress)
curIng := cur.(*extensions.Ingress)
oldIng := old.(*networkingv1.Ingress)
curIng := cur.(*networkingv1.Ingress)
// ignore the same secret as the old one
if oldIng.ResourceVersion == curIng.ResourceVersion || reflect.DeepEqual(oldIng, curIng) {
return
@ -344,7 +342,7 @@ func New(client kubernetes.Interface,
}
// checkIngress checks whether the given ing is valid.
func (s *k8sStore) checkIngress(ing *extensions.Ingress) bool {
func (s *k8sStore) checkIngress(ing *networkingv1.Ingress) bool {
i, err := l4.NewParser(s).Parse(ing)
if err != nil {
logrus.Warningf("Uxpected error with ingress: %v", err)
@ -367,7 +365,7 @@ func (s *k8sStore) checkIngress(ing *extensions.Ingress) bool {
// extractAnnotations parses ingress annotations converting the value of the
// annotation to a go struct and also information about the referenced secrets
func (s *k8sStore) extractAnnotations(ing *extensions.Ingress) {
func (s *k8sStore) extractAnnotations(ing *networkingv1.Ingress) {
key := ik8s.MetaNamespaceKey(ing)
logrus.Debugf("updating annotations information for ingress %v", key)
@ -466,7 +464,7 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
// ServerName-LocationPath -> location
srvLocMap := make(map[string]*v1.Location)
for _, item := range s.listers.Ingress.List() {
ing := item.(*extensions.Ingress)
ing := item.(*networkingv1.Ingress)
if !s.ingressIsValid(ing) {
continue
}
@ -493,8 +491,8 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
continue
}
}
svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.Backend.ServiceName)
protocol := s.GetServiceProtocol(svcKey, ing.Spec.Backend.ServicePort.IntVal)
svcKey := fmt.Sprintf("%v/%v", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)
protocol := s.GetServiceProtocol(svcKey, ing.Spec.DefaultBackend.Service.Port.Number)
listening := fmt.Sprintf("%s:%v", host, anns.L4.L4Port)
if string(protocol) == string(v1.ProtocolUDP) {
listening = fmt.Sprintf("%s %s", listening, "udp")
@ -524,11 +522,11 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
}
vs.Namespace = anns.Namespace
vs.ServiceID = anns.Labels["service_id"]
l4PoolMap[ing.Spec.Backend.ServiceName] = struct{}{}
l4PoolMap[ing.Spec.DefaultBackend.Service.Name] = struct{}{}
l4vsMap[listening] = vs
l4vs = append(l4vs, vs)
backend := backend{name: backendName, weight: anns.Weight.Weight}
l4PoolBackendMap[ing.Spec.Backend.ServiceName] = append(l4PoolBackendMap[ing.Spec.Backend.ServiceName], backend)
l4PoolBackendMap[ing.Spec.DefaultBackend.Service.Name] = append(l4PoolBackendMap[ing.Spec.DefaultBackend.Service.Name], backend)
// endregion
} else {
// region l7
@ -593,7 +591,7 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
for _, path := range rule.IngressRuleValue.HTTP.Paths {
locKey := fmt.Sprintf("%s_%s", virSrvName, path.Path)
location := srvLocMap[locKey]
l7PoolMap[path.Backend.ServiceName] = struct{}{}
l7PoolMap[path.Backend.Service.Name] = struct{}{}
// if location do not exists, then creates a new one
if location == nil {
location = &v1.Location{
@ -631,7 +629,7 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
if anns.UpstreamHashBy != "" {
backend.hashBy = anns.UpstreamHashBy
}
l7PoolBackendMap[path.Backend.ServiceName] = append(l7PoolBackendMap[path.Backend.ServiceName], backend)
l7PoolBackendMap[path.Backend.Service.Name] = append(l7PoolBackendMap[path.Backend.Service.Name], backend)
}
}
// endregion
@ -639,7 +637,7 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
}
for _, item := range s.listers.Ingress.List() {
ing := item.(*extensions.Ingress)
ing := item.(*networkingv1.Ingress)
if !s.ingressIsValid(ing) {
continue
}
@ -701,15 +699,15 @@ func (s *k8sStore) ListVirtualService() (l7vs []*v1.VirtualService, l4vs []*v1.V
}
// ingressIsValid checks if the specified ingress is valid
func (s *k8sStore) ingressIsValid(ing *extensions.Ingress) bool {
func (s *k8sStore) ingressIsValid(ing *networkingv1.Ingress) bool {
var endpointKey string
if ing.Spec.Backend != nil { // stream
endpointKey = fmt.Sprintf("%s/%s", ing.Namespace, ing.Spec.Backend.ServiceName)
if ing.Spec.DefaultBackend != nil { // stream
endpointKey = fmt.Sprintf("%s/%s", ing.Namespace, ing.Spec.DefaultBackend.Service.Name)
} else { // http
Loop:
for _, rule := range ing.Spec.Rules {
for _, path := range rule.IngressRuleValue.HTTP.Paths {
endpointKey = fmt.Sprintf("%s/%s", ing.Namespace, path.Backend.ServiceName)
endpointKey = fmt.Sprintf("%s/%s", ing.Namespace, path.Backend.Service.Name)
if endpointKey != "" {
break Loop
}
@ -752,16 +750,16 @@ func hasReadyAddresses(endpoints *corev1.Endpoints) bool {
}
// GetIngress returns the Ingress matching key.
func (s *k8sStore) GetIngress(key string) (*extensions.Ingress, error) {
func (s *k8sStore) GetIngress(key string) (*networkingv1.Ingress, error) {
return s.listers.Ingress.ByKey(key)
}
// ListIngresses returns the list of Ingresses
func (s *k8sStore) ListIngresses() []*extensions.Ingress {
func (s *k8sStore) ListIngresses() []*networkingv1.Ingress {
// filter ingress rules
var ingresses []*extensions.Ingress
var ingresses []*networkingv1.Ingress
for _, item := range s.listers.Ingress.List() {
ing := item.(*extensions.Ingress)
ing := item.(*networkingv1.Ingress)
ingresses = append(ingresses, ing)
}
@ -803,7 +801,7 @@ func (s *k8sStore) Run(stopCh chan struct{}) {
// syncSecrets synchronizes data from all Secrets referenced by the given
// Ingress with the local store and file system.
func (s *k8sStore) syncSecrets(ing *extensions.Ingress) {
func (s *k8sStore) syncSecrets(ing *networkingv1.Ingress) {
key := ik8s.MetaNamespaceKey(ing)
for _, secrKey := range s.secretIngressMap.getSecretKeys(key) {
s.syncSecret(secrKey)
@ -895,7 +893,7 @@ func (s *k8sStore) loopUpdateIngress() {
for ipevent := range s.node.IPManager().NeedUpdateGatewayPolicy() {
ingress := s.listers.Ingress.List()
for i := range ingress {
curIng, ok := ingress[i].(*v1beta1.Ingress)
curIng, ok := ingress[i].(*networkingv1.Ingress)
if ok && curIng != nil && s.annotations.Extract(curIng).L4.L4Host == ipevent.IP.String() {
s.extractAnnotations(curIng)
s.secretIngressMap.update(curIng)

View File

@ -7,7 +7,7 @@ import (
"github.com/goodrain/rainbond/gateway/annotations/parser"
api "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -60,8 +60,8 @@ func TestRbdStore_checkIngress(t *testing.T) {
}
}
func buildIngress() *extensions.Ingress {
return &extensions.Ingress{
func buildIngress() *networkingv1.Ingress {
return &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "foobar",
Namespace: api.NamespaceDefault,

View File

@ -25,7 +25,7 @@ import (
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
api_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -126,23 +126,27 @@ func TestHttpDefault(t *testing.T) {
_ = ensureService(service, clientSet, t)
time.Sleep(3 * time.Second)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "default-ing",
Namespace: ns.Name,
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "www.http-router.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/http-router",
Backend: extensions.IngressBackend{
ServiceName: "default-svc",
ServicePort: intstr.FromInt(80),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "default-svc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
@ -229,7 +233,7 @@ func TestHttpCookie(t *testing.T) {
time.Sleep(3 * time.Second)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "router-cookie-ing",
Namespace: ns.Name,
@ -237,18 +241,22 @@ func TestHttpCookie(t *testing.T) {
parser.GetAnnotationWithPrefix("cookie"): "ck1:cv1;ck2:cv2;",
},
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "www.http-router.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/http-router",
Backend: extensions.IngressBackend{
ServiceName: "router-cookie-svc",
ServicePort: intstr.FromInt(80),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "router-cookie-svc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
@ -338,7 +346,7 @@ func TestHttpHeader(t *testing.T) {
time.Sleep(3 * time.Second)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "router-header-ing",
Namespace: ns.Name,
@ -346,18 +354,22 @@ func TestHttpHeader(t *testing.T) {
parser.GetAnnotationWithPrefix("header"): "hk1:hv1;hk2:hv2;",
},
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "www.http-router.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/http-router",
Backend: extensions.IngressBackend{
ServiceName: "router-header-svc",
ServicePort: intstr.FromInt(80),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "router-header-svc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
@ -459,7 +471,7 @@ func TestHttpUpstreamHashBy(t *testing.T) {
time.Sleep(3 * time.Second)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "upstreamhashby-ing",
Namespace: ns.Name,
@ -467,18 +479,22 @@ func TestHttpUpstreamHashBy(t *testing.T) {
parser.GetAnnotationWithPrefix("upstream-hash-by"): "$request_uri",
},
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "www.http-upstreamhashby.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
Backend: extensions.IngressBackend{
ServiceName: "upstreamhashby-svc",
ServicePort: intstr.FromInt(80),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "upstreamhashby-svc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
@ -555,15 +571,15 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
}
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
func ensureIngress(ingress *networkingv1.Ingress, clientSet kubernetes.Interface, t *testing.T) *networkingv1.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
ing, err := clientSet.NetworkingV1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
ing, err = clientSet.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}

View File

@ -27,11 +27,10 @@ import (
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
api_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
)
@ -174,7 +173,7 @@ func TestHttps(t *testing.T) {
Type: corev1.SecretTypeOpaque,
}, clientSet, t)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "https-ing",
Namespace: ns.Name,
@ -182,24 +181,28 @@ func TestHttps(t *testing.T) {
parser.GetAnnotationWithPrefix("force-ssl-redirect"): "true",
},
},
Spec: extensions.IngressSpec{
TLS: []v1beta1.IngressTLS{
Spec: networkingv1.IngressSpec{
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"www.https.com"},
SecretName: secr.Name,
},
},
Rules: []extensions.IngressRule{
Rules: []networkingv1.IngressRule{
{
Host: "www.https.com",
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/https",
Backend: extensions.IngressBackend{
ServiceName: "default-svc",
ServicePort: intstr.FromInt(80),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "default-svc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
@ -276,15 +279,15 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
}
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
func ensureIngress(ingress *networkingv1.Ingress, clientSet kubernetes.Interface, t *testing.T) *networkingv1.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
ing, err := clientSet.NetworkingV1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
ing, err = clientSet.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}

View File

@ -27,11 +27,10 @@ import (
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
api_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
)
@ -108,7 +107,7 @@ func TestTcp(t *testing.T) {
_ = ensureService(service, clientSet, t)
time.Sleep(3 * time.Second)
ingress := &extensions.Ingress{
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "tcp-ing",
Namespace: ns.Name,
@ -118,11 +117,13 @@ func TestTcp(t *testing.T) {
parser.GetAnnotationWithPrefix("l4-port"): "32145",
},
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "default-svc",
ServicePort: intstr.IntOrString{
IntVal: 30000,
Spec: networkingv1.IngressSpec{
DefaultBackend: &networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "default-svc",
Port: networkingv1.ServiceBackendPort{
Number: 30000,
},
},
},
},
@ -193,15 +194,15 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
return svc
}
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
func ensureIngress(ingress *networkingv1.Ingress, clientSet kubernetes.Interface, t *testing.T) *networkingv1.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
ing, err := clientSet.NetworkingV1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
ing, err = clientSet.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}

5
go.mod
View File

@ -21,6 +21,7 @@ require (
github.com/docker/cli v20.10.3+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.2+incompatible
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82
github.com/docker/go-units v0.4.0
github.com/docker/libcompose v0.4.1-0.20190808084053-143e0f3f1ab9
github.com/eapache/channels v1.1.0
@ -44,7 +45,7 @@ require (
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.4.4
github.com/golang/protobuf v1.4.3
github.com/goodrain/rainbond-oam v0.0.0-20210721020036-158e1be667dc
github.com/goodrain/rainbond-oam v0.0.0-20210810094229-f1cd639c451a
github.com/goodrain/rainbond-operator v1.3.1-0.20210401055914-f8fe4bf89a21
github.com/google/go-cmp v0.5.4 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
@ -112,6 +113,8 @@ require (
k8s.io/cli-runtime v0.20.4
k8s.io/client-go v12.0.0+incompatible
k8s.io/code-generator v0.20.4
k8s.io/component-base v0.20.4
k8s.io/kubernetes v1.13.0
sigs.k8s.io/controller-runtime v0.7.0
sigs.k8s.io/yaml v1.2.0
)

3
go.sum
View File

@ -696,6 +696,8 @@ github.com/goodrain/gorm-bulk-upsert v1.0.1-0.20210608013724-7e7870d16357 h1:kdS
github.com/goodrain/gorm-bulk-upsert v1.0.1-0.20210608013724-7e7870d16357/go.mod h1:b7/GgeVNbf/SFw4FYIWslxNV5I10C9Mhf/++3jsDk3M=
github.com/goodrain/rainbond-oam v0.0.0-20210721020036-158e1be667dc h1:hCtxb/Yy4G+wEc2n+yaXx3j4SF/s34zNI8XK5qkHqXk=
github.com/goodrain/rainbond-oam v0.0.0-20210721020036-158e1be667dc/go.mod h1:/dRehR3e1pGexOaIDjA44AHBlVPbb7v+O7GWAVyo740=
github.com/goodrain/rainbond-oam v0.0.0-20210810094229-f1cd639c451a h1:a48En+OrB5PzoOJflEEc77eCzh6mODRHkCpx+4kB2/0=
github.com/goodrain/rainbond-oam v0.0.0-20210810094229-f1cd639c451a/go.mod h1:/dRehR3e1pGexOaIDjA44AHBlVPbb7v+O7GWAVyo740=
github.com/goodrain/rainbond-operator v1.3.1-0.20210401055914-f8fe4bf89a21 h1:iCPI96slvJv88iPc1NJW8zhpkiza0kwB0jtsuZIJLRQ=
github.com/goodrain/rainbond-operator v1.3.1-0.20210401055914-f8fe4bf89a21/go.mod h1:jcQfNoxO67nkLalCmgihYrdWF82TKyuPW032tgGdqVY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -2172,6 +2174,7 @@ k8s.io/kubectl v0.18.5/go.mod h1:LAGxvYunNuwcZst0OAMXnInFIv81/IeoAz2N1Yh+AhU=
k8s.io/kubectl v0.18.6/go.mod h1:3TLzFOrF9h4mlRPAvdNkDbs5NWspN4e0EnPnEB41CGo=
k8s.io/kubectl v0.20.4 h1:Y1gUiigiZM+ulcrnWeqSHlTd0/7xWcQIXjuMnjtHyoo=
k8s.io/kubectl v0.20.4/go.mod h1:yCC5lUQyXRmmtwyxfaakryh9ezzp/bT0O14LeoFLbGo=
k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
k8s.io/metrics v0.18.5/go.mod h1:pqn6YiCCxUt067ivZVo4KtvppvdykV6HHG5+7ygVkNg=

View File

@ -21,9 +21,12 @@ package v1alpha1
import (
"fmt"
"net"
"net/url"
"strconv"
"strings"
validation "github.com/goodrain/rainbond/util/endpoint"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -35,7 +38,7 @@ func init() {
// +genclient
// +kubebuilder:object:root=true
// HelmApp -
// ThirdComponent -
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=thirdcomponents,scope=Namespaced
type ThirdComponent struct {
@ -46,6 +49,21 @@ type ThirdComponent struct {
Status ThirdComponentStatus `json:"status,omitempty"`
}
// GetComponentID -
func (in *ThirdComponent) GetComponentID() string {
return in.Name
}
// GetEndpointID -
func (in *ThirdComponent) GetEndpointID(endpoint *ThirdComponentEndpointStatus) string {
return fmt.Sprintf("%s/%s/%s", in.Namespace, in.Name, string(endpoint.Address))
}
// GetNamespaceName -
func (in *ThirdComponent) GetNamespaceName() string {
return fmt.Sprintf("%s/%s", in.Namespace, in.Name)
}
// +kubebuilder:object:root=true
// ThirdComponentList contains a list of ThirdComponent
@ -55,16 +73,31 @@ type ThirdComponentList struct {
Items []ThirdComponent `json:"items"`
}
// ThirdComponentSpec -
type ThirdComponentSpec struct {
// health check probe
// +optional
Probe *HealthProbe `json:"probe,omitempty"`
Probe *Probe `json:"probe,omitempty"`
// component regist ports
Ports []*ComponentPort `json:"ports"`
// endpoint source config
EndpointSource ThirdComponentEndpointSource `json:"endpointSource"`
}
// NeedProbe -
func (in ThirdComponentSpec) NeedProbe() bool {
if in.Probe == nil {
return false
}
return in.IsStaticEndpoints()
}
// IsStaticEndpoints -
func (in ThirdComponentSpec) IsStaticEndpoints() bool {
return len(in.EndpointSource.StaticEndpoints) > 0
}
// ThirdComponentEndpointSource -
type ThirdComponentEndpointSource struct {
StaticEndpoints []*ThirdComponentEndpoint `json:"endpoints,omitempty"`
KubernetesService *KubernetesServiceSource `json:"kubernetesService,omitempty"`
@ -75,17 +108,43 @@ type ThirdComponentEndpointSource struct {
// CustomAPISource
}
// ThirdComponentEndpoint -
type ThirdComponentEndpoint struct {
// The address including the port number.
Address string `json:"address"`
// Then Name of the Endpoint.
// +optional
Name string `json:"name"`
// Address protocols, including: HTTP, TCP, UDP, HTTPS
// +optional
Protocol string `json:"protocol,omitempty"`
// Specify a private certificate when the protocol is HTTPS
// +optional
ClentSecret string `json:"clientSecret,omitempty"`
ClientSecret string `json:"clientSecret,omitempty"`
}
// GetPort -
func (in *ThirdComponentEndpoint) GetPort() int {
arr := strings.Split(in.Address, ":")
if len(arr) != 2 {
return 0
}
port, _ := strconv.Atoi(arr[1])
return port
}
// GetIP -
func (in *ThirdComponentEndpoint) GetIP() string {
arr := strings.Split(in.Address, ":")
if len(arr) != 2 {
return ""
}
return arr[0]
}
// KubernetesServiceSource -
type KubernetesServiceSource struct {
// If not specified, the namespace is the namespace of the current resource
// +optional
@ -93,7 +152,49 @@ type KubernetesServiceSource struct {
Name string `json:"name"`
}
type HealthProbe struct {
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// Equals -
func (in *Probe) Equals(target *Probe) bool {
if in.TimeoutSeconds != target.TimeoutSeconds {
return false
}
if in.PeriodSeconds != target.PeriodSeconds {
return false
}
if in.SuccessThreshold != target.SuccessThreshold {
return false
}
if in.FailureThreshold != target.FailureThreshold {
return false
}
return in.Handler.Equals(&target.Handler)
}
// Handler defines a specific action that should be taken
type Handler struct {
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty"`
@ -104,6 +205,21 @@ type HealthProbe struct {
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"`
}
// Equals -
func (in *Handler) Equals(target *Handler) bool {
if in == nil && target == nil {
return true
}
if in == nil || target == nil {
return false
}
if !in.HTTPGet.Equals(target.HTTPGet) {
return false
}
return in.TCPSocket.Equals(target.TCPSocket)
}
//ComponentPort component port define
type ComponentPort struct {
Name string `json:"name"`
@ -116,6 +232,11 @@ type ComponentPort struct {
type TCPSocketAction struct {
}
// Equals -
func (in *TCPSocketAction) Equals(target *TCPSocketAction) bool {
return true
}
//HTTPGetAction enable http check
type HTTPGetAction struct {
// Path to access on the HTTP server.
@ -126,6 +247,38 @@ type HTTPGetAction struct {
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"`
}
// Equals -
func (in *HTTPGetAction) Equals(target *HTTPGetAction) bool {
if in == nil && target == nil {
return true
}
if in == nil || target == nil {
return false
}
if in.Path != target.Path {
return false
}
if len(in.HTTPHeaders) != len(target.HTTPHeaders) {
return false
}
headers := make(map[string]string)
for _, header := range in.HTTPHeaders {
headers[header.Name] = header.Value
}
for _, header := range target.HTTPHeaders {
value, ok := headers[header.Name]
if !ok {
return false
}
if header.Value != value {
return false
}
}
return true
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
@ -134,6 +287,7 @@ type HTTPHeader struct {
Value string `json:"value"`
}
// ComponentPhase -
type ComponentPhase string
// These are the valid statuses of pods.
@ -147,12 +301,14 @@ const (
ComponentFailed ComponentPhase = "Failed"
)
// ThirdComponentStatus -
type ThirdComponentStatus struct {
Phase ComponentPhase `json:"phase"`
Reason string `json:"reason,omitempty"`
Endpoints []*ThirdComponentEndpointStatus `json:"endpoints"`
}
// EndpointStatus -
type EndpointStatus string
const (
@ -160,11 +316,23 @@ const (
EndpointReady EndpointStatus = "Ready"
//EndpointNotReady it means the probe not passed.
EndpointNotReady EndpointStatus = "NotReady"
// EndpointUnhealthy means that the health prober failed.
EndpointUnhealthy EndpointStatus = "Unhealthy"
)
// EndpointAddress -
type EndpointAddress string
// GetIP -
func (e EndpointAddress) GetIP() string {
ip := e.getIP()
if validation.IsDomainNotIP(ip) {
return "1.1.1.1"
}
return ip
}
func (e EndpointAddress) getIP() string {
info := strings.Split(string(e), ":")
if len(info) == 2 {
return info[0]
@ -172,23 +340,62 @@ func (e EndpointAddress) GetIP() string {
return ""
}
// GetPort -
func (e EndpointAddress) GetPort() int {
info := strings.Split(string(e), ":")
if len(info) == 2 {
port, _ := strconv.Atoi(info[1])
return port
if !validation.IsDomainNotIP(e.getIP()) {
info := strings.Split(string(e), ":")
if len(info) == 2 {
port, _ := strconv.Atoi(info[1])
return port
}
return 0
}
return 0
u, err := url.Parse(e.EnsureScheme())
if err != nil {
logrus.Errorf("parse address %s: %v", e.EnsureScheme(), err)
return 0
}
logrus.Infof("url: %s; scheme: %s", e.EnsureScheme(), u.Scheme)
if u.Scheme == "https" {
return 443
}
return 80
}
// EnsureScheme -
func (e EndpointAddress) EnsureScheme() string {
address := string(e)
return ensureScheme(address)
}
func ensureScheme(address string) string {
if strings.HasPrefix(address, "http://") || strings.HasPrefix(address, "https://") {
return address
}
// The default scheme is http
return "http://" + address
}
// NewEndpointAddress -
func NewEndpointAddress(host string, port int) *EndpointAddress {
if net.ParseIP(host) == nil {
if !validation.IsDomainNotIP(host) {
if net.ParseIP(host) == nil {
return nil
}
if port < 0 || port > 65533 {
return nil
}
ea := EndpointAddress(fmt.Sprintf("%s:%d", host, port))
return &ea
}
_, err := url.Parse(ensureScheme(host))
if err != nil {
return nil
}
if port < 0 || port > 65533 {
return nil
}
ea := EndpointAddress(fmt.Sprintf("%s:%d", host, port))
ea := EndpointAddress(host)
return &ea
}
@ -196,6 +403,9 @@ func NewEndpointAddress(host string, port int) *EndpointAddress {
type ThirdComponentEndpointStatus struct {
// The address including the port number.
Address EndpointAddress `json:"address"`
// Then Name of the Endpoint.
// +optional
Name string `json:"name"`
// Reference to object providing the endpoint.
// +optional
TargetRef *v1.ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`

View File

@ -197,7 +197,7 @@ func (in *HTTPHeader) DeepCopy() *HTTPHeader {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthProbe) DeepCopyInto(out *HealthProbe) {
func (in *Handler) DeepCopyInto(out *Handler) {
*out = *in
if in.HTTPGet != nil {
in, out := &in.HTTPGet, &out.HTTPGet
@ -211,12 +211,12 @@ func (in *HealthProbe) DeepCopyInto(out *HealthProbe) {
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthProbe.
func (in *HealthProbe) DeepCopy() *HealthProbe {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler.
func (in *Handler) DeepCopy() *Handler {
if in == nil {
return nil
}
out := new(HealthProbe)
out := new(Handler)
in.DeepCopyInto(out)
return out
}
@ -378,6 +378,22 @@ func (in *KubernetesServiceSource) DeepCopy() *KubernetesServiceSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Probe) DeepCopyInto(out *Probe) {
*out = *in
in.Handler.DeepCopyInto(&out.Handler)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe.
func (in *Probe) DeepCopy() *Probe {
if in == nil {
return nil
}
out := new(Probe)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Schematic) DeepCopyInto(out *Schematic) {
*out = *in
@ -543,7 +559,7 @@ func (in *ThirdComponentSpec) DeepCopyInto(out *ThirdComponentSpec) {
*out = *in
if in.Probe != nil {
in, out := &in.Probe, &out.Probe
*out = new(HealthProbe)
*out = new(Probe)
(*in).DeepCopyInto(*out)
}
if in.Ports != nil {

View File

@ -527,6 +527,7 @@ func Zip(source, target string) error {
return err
}
// UnTar -
func UnTar(archive, target string, zip bool) error {
parameter := "-x"
if zip {

View File

@ -17,7 +17,7 @@ limitations under the License.
package store
import (
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/client-go/tools/cache"
)
@ -27,7 +27,7 @@ type IngressLister struct {
}
// ByKey returns the Ingress matching key in the local Ingress store.
func (il IngressLister) ByKey(key string) (*extensions.Ingress, error) {
func (il IngressLister) ByKey(key string) (*networkingv1.Ingress, error) {
i, exists, err := il.GetByKey(key)
if err != nil {
return nil, err
@ -35,5 +35,5 @@ func (il IngressLister) ByKey(key string) (*extensions.Ingress, error) {
if !exists {
return nil, NotExistsError(key)
}
return i.(*extensions.Ingress), nil
return i.(*networkingv1.Ingress), nil
}

View File

@ -2,6 +2,7 @@ package k8s
import (
"encoding/json"
networkingv1 "k8s.io/api/networking/v1"
"net"
"os"
@ -119,3 +120,8 @@ func CreatePatch(o, n, datastruct interface{}) ([]byte, error) {
}
return strategicpatch.CreateTwoWayMergePatch(oldData, newData, datastruct)
}
// IngressPathType -
func IngressPathType(pathType networkingv1.PathType) *networkingv1.PathType {
return &pathType
}

View File

@ -1,73 +0,0 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package appm
import (
"github.com/eapache/channels"
"github.com/goodrain/rainbond/worker/appm/prober"
"github.com/goodrain/rainbond/worker/appm/store"
"github.com/goodrain/rainbond/worker/appm/thirdparty"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
)
// NewAPPMController creates a new appm controller.
func NewAPPMController(clientset kubernetes.Interface,
store store.Storer,
startCh *channels.RingChannel,
updateCh *channels.RingChannel,
probeCh *channels.RingChannel) *Controller {
c := &Controller{
store: store,
updateCh: updateCh,
startCh: startCh,
probeCh: probeCh,
stopCh: make(chan struct{}),
}
// create prober first, then thirdparty
c.prober = prober.NewProber(c.store, c.probeCh, c.updateCh)
c.thirdparty = thirdparty.NewThirdPartier(clientset, c.store, c.startCh, c.updateCh, c.stopCh, c.prober)
return c
}
// Controller describes a new appm controller.
type Controller struct {
store store.Storer
thirdparty thirdparty.ThirdPartier
prober prober.Prober
startCh *channels.RingChannel
updateCh *channels.RingChannel
probeCh *channels.RingChannel
stopCh chan struct{}
}
// Start starts appm controller
func (c *Controller) Start() error {
c.thirdparty.Start()
c.prober.Start()
logrus.Debugf("start thirdparty appm manager success")
return nil
}
// Stop stops appm controller.
func (c *Controller) Stop() {
close(c.stopCh)
c.prober.Stop()
}

View File

@ -18,12 +18,17 @@
package componentdefinition
import "github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
//ThirdComponentProperties third component properties
type ThirdComponentProperties struct {
Kubernetes ThirdComponentKubernetes `json:"kubernetes"`
Port []*ThirdComponentPort `json:"port"`
Kubernetes *ThirdComponentKubernetes `json:"kubernetes,omitempty"`
Endpoints []*v1alpha1.ThirdComponentEndpoint `json:"endpoints,omitempty"`
Port []*ThirdComponentPort `json:"port"`
Probe *v1alpha1.Probe `json:"probe,omitempty"`
}
// ThirdComponentPort -
type ThirdComponentPort struct {
Name string `json:"name"`
Port int `json:"port"`
@ -31,6 +36,7 @@ type ThirdComponentPort struct {
OpenOuter bool `json:"openOuter"`
}
// ThirdComponentKubernetes -
type ThirdComponentKubernetes struct {
Name string `json:"name"`
Namespace string `json:"namespace"`

View File

@ -30,33 +30,43 @@ import (
rainbondversioned "github.com/goodrain/rainbond/pkg/generated/clientset/versioned"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ErrNotSupport -
var ErrNotSupport = fmt.Errorf("not support component definition")
// ErrOnlyCUESupport -
var ErrOnlyCUESupport = fmt.Errorf("component definition only support cue template")
type ComponentDefinitionBuilder struct {
// Builder -
type Builder struct {
logger *logrus.Entry
definitions map[string]*v1alpha1.ComponentDefinition
namespace string
lock sync.Mutex
}
var componentDefinitionBuilder *ComponentDefinitionBuilder
var componentDefinitionBuilder *Builder
func NewComponentDefinitionBuilder(namespace string) *ComponentDefinitionBuilder {
componentDefinitionBuilder = &ComponentDefinitionBuilder{
// NewComponentDefinitionBuilder -
func NewComponentDefinitionBuilder(namespace string) *Builder {
componentDefinitionBuilder = &Builder{
logger: logrus.WithField("WHO", "Builder"),
definitions: make(map[string]*v1alpha1.ComponentDefinition),
namespace: namespace,
}
return componentDefinitionBuilder
}
func GetComponentDefinitionBuilder() *ComponentDefinitionBuilder {
// GetComponentDefinitionBuilder -
func GetComponentDefinitionBuilder() *Builder {
return componentDefinitionBuilder
}
func (c *ComponentDefinitionBuilder) OnAdd(obj interface{}) {
// OnAdd -
func (c *Builder) OnAdd(obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
cd, ok := obj.(*v1alpha1.ComponentDefinition)
@ -65,7 +75,9 @@ func (c *ComponentDefinitionBuilder) OnAdd(obj interface{}) {
c.definitions[cd.Name] = cd
}
}
func (c *ComponentDefinitionBuilder) OnUpdate(oldObj, newObj interface{}) {
// OnUpdate -
func (c *Builder) OnUpdate(oldObj, newObj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
cd, ok := newObj.(*v1alpha1.ComponentDefinition)
@ -74,7 +86,9 @@ func (c *ComponentDefinitionBuilder) OnUpdate(oldObj, newObj interface{}) {
c.definitions[cd.Name] = cd
}
}
func (c *ComponentDefinitionBuilder) OnDelete(obj interface{}) {
// OnDelete -
func (c *Builder) OnDelete(obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
cd, ok := obj.(*v1alpha1.ComponentDefinition)
@ -84,16 +98,18 @@ func (c *ComponentDefinitionBuilder) OnDelete(obj interface{}) {
}
}
func (c *ComponentDefinitionBuilder) GetComponentDefinition(name string) *v1alpha1.ComponentDefinition {
// GetComponentDefinition -
func (c *Builder) GetComponentDefinition(name string) *v1alpha1.ComponentDefinition {
c.lock.Lock()
defer c.lock.Unlock()
return c.definitions[name]
}
func (c *ComponentDefinitionBuilder) GetComponentProperties(as *v1.AppService, dbm db.Manager, cd *v1alpha1.ComponentDefinition) interface{} {
// GetComponentProperties -
func (c *Builder) GetComponentProperties(as *v1.AppService, dbm db.Manager, cd *v1alpha1.ComponentDefinition) interface{} {
//TODO: support custom component properties
switch cd.Name {
case thirdComponetDefineName:
case thirdComponentDefineName:
properties := &ThirdComponentProperties{}
tpsd, err := dbm.ThirdPartySvcDiscoveryCfgDao().GetByServiceID(as.ServiceID)
if err != nil {
@ -102,17 +118,24 @@ func (c *ComponentDefinitionBuilder) GetComponentProperties(as *v1.AppService, d
if tpsd != nil {
// support other source type
if tpsd.Type == dbmodel.DiscorveryTypeKubernetes.String() {
properties.Kubernetes = ThirdComponentKubernetes{
properties.Kubernetes = &ThirdComponentKubernetes{
Name: tpsd.ServiceName,
Namespace: tpsd.Namespace,
}
}
}
// static endpoints
endpoints, err := c.listStaticEndpoints(as.ServiceID)
if err != nil {
c.logger.Errorf("component id: %s; list static endpoints: %v", as.ServiceID, err)
}
properties.Endpoints = endpoints
ports, err := dbm.TenantServicesPortDao().GetPortsByServiceID(as.ServiceID)
if err != nil {
logrus.Errorf("query component %s ports failure %s", as.ServiceID, err.Error())
}
for _, port := range ports {
properties.Port = append(properties.Port, &ThirdComponentPort{
Port: port.ContainerPort,
@ -124,13 +147,38 @@ func (c *ComponentDefinitionBuilder) GetComponentProperties(as *v1.AppService, d
if properties.Port == nil {
properties.Port = []*ThirdComponentPort{}
}
// probe
probe, err := c.createProbe(as.ServiceID)
if err != nil {
c.logger.Warningf("create probe: %v", err)
}
properties.Probe = probe
return properties
default:
return nil
}
}
func (c *ComponentDefinitionBuilder) BuildWorkloadResource(as *v1.AppService, dbm db.Manager) error {
func (c *Builder) listStaticEndpoints(componentID string) ([]*v1alpha1.ThirdComponentEndpoint, error) {
endpoints, err := db.GetManager().EndpointsDao().List(componentID)
if err != nil {
return nil, err
}
var res []*v1alpha1.ThirdComponentEndpoint
for _, ep := range endpoints {
res = append(res, &v1alpha1.ThirdComponentEndpoint{
Address: ep.GetAddress(),
Name: ep.UUID,
})
}
return res, nil
}
// BuildWorkloadResource -
func (c *Builder) BuildWorkloadResource(as *v1.AppService, dbm db.Manager) error {
cd := c.GetComponentDefinition(as.GetComponentDefinitionName())
if cd == nil {
return ErrNotSupport
@ -153,15 +201,104 @@ func (c *ComponentDefinitionBuilder) BuildWorkloadResource(as *v1.AppService, db
//InitCoreComponentDefinition init the built-in component type definition.
//Should be called after the store is initialized.
func (c *ComponentDefinitionBuilder) InitCoreComponentDefinition(rainbondClient rainbondversioned.Interface) {
coreComponentDefinition := []*v1alpha1.ComponentDefinition{&thirdComponetDefine}
func (c *Builder) InitCoreComponentDefinition(rainbondClient rainbondversioned.Interface) {
coreComponentDefinition := []*v1alpha1.ComponentDefinition{&thirdComponentDefine}
for _, ccd := range coreComponentDefinition {
if c.GetComponentDefinition(ccd.Name) == nil {
oldCoreComponentDefinition := c.GetComponentDefinition(ccd.Name)
if oldCoreComponentDefinition == nil {
logrus.Infof("create core componentdefinition %s", ccd.Name)
if _, err := rainbondClient.RainbondV1alpha1().ComponentDefinitions(c.namespace).Create(context.Background(), ccd, metav1.CreateOptions{}); err != nil {
if _, err := rainbondClient.RainbondV1alpha1().ComponentDefinitions(c.namespace).Create(context.Background(), ccd, metav1.CreateOptions{}); err != nil && !k8sErrors.IsNotFound(err) {
logrus.Errorf("create core componentdefinition %s failire %s", ccd.Name, err.Error())
}
} else {
err := c.updateComponentDefinition(rainbondClient, oldCoreComponentDefinition, ccd)
if err != nil {
logrus.Errorf("update core componentdefinition(%s): %v", ccd.Name, err)
}
}
}
logrus.Infof("success check core componentdefinition from cluster")
}
func (c *Builder) updateComponentDefinition(rainbondClient rainbondversioned.Interface, oldComponentDefinition, newComponentDefinition *v1alpha1.ComponentDefinition) error {
newVersion := getComponentDefinitionVersion(newComponentDefinition)
oldVersion := getComponentDefinitionVersion(oldComponentDefinition)
if newVersion == "" || !(oldVersion == "" || newVersion > oldVersion) {
return nil
}
logrus.Infof("update core componentdefinition %s", newComponentDefinition.Name)
newComponentDefinition.ResourceVersion = oldComponentDefinition.ResourceVersion
if _, err := rainbondClient.RainbondV1alpha1().ComponentDefinitions(c.namespace).Update(context.Background(), newComponentDefinition, metav1.UpdateOptions{}); err != nil {
if k8sErrors.IsNotFound(err) {
_, err := rainbondClient.RainbondV1alpha1().ComponentDefinitions(c.namespace).Create(context.Background(), newComponentDefinition, metav1.CreateOptions{})
if err != nil {
return err
}
}
return err
}
return nil
}
func getComponentDefinitionVersion(componentDefinition *v1alpha1.ComponentDefinition) string {
if componentDefinition.ObjectMeta.Annotations == nil {
return ""
}
return componentDefinition.ObjectMeta.Annotations["version"]
}
func (c *Builder) createProbe(componentID string) (*v1alpha1.Probe, error) {
probe, err := db.GetManager().ServiceProbeDao().GetServiceUsedProbe(componentID, "readiness")
if err != nil {
return nil, err
}
if probe == nil {
return nil, nil
}
p := &v1alpha1.Probe{
TimeoutSeconds: int32(probe.TimeoutSecond),
PeriodSeconds: int32(probe.PeriodSecond),
SuccessThreshold: int32(probe.SuccessThreshold),
FailureThreshold: int32(probe.FailureThreshold),
}
if probe.Scheme == "tcp" {
p.TCPSocket = c.createTCPGetAction(probe)
} else {
p.HTTPGet = c.createHTTPGetAction(probe)
}
return p, nil
}
func (c *Builder) createHTTPGetAction(probe *dbmodel.TenantServiceProbe) *v1alpha1.HTTPGetAction {
action := &v1alpha1.HTTPGetAction{Path: probe.Path}
if probe.HTTPHeader != "" {
hds := strings.Split(probe.HTTPHeader, ",")
var headers []v1alpha1.HTTPHeader
for _, hd := range hds {
kv := strings.Split(hd, "=")
if len(kv) == 1 {
header := v1alpha1.HTTPHeader{
Name: kv[0],
Value: "",
}
headers = append(headers, header)
} else if len(kv) == 2 {
header := v1alpha1.HTTPHeader{
Name: kv[0],
Value: kv[1],
}
headers = append(headers, header)
}
}
action.HTTPHeaders = headers
}
return action
}
func (c *Builder) createTCPGetAction(probe *dbmodel.TenantServiceProbe) *v1alpha1.TCPSocketAction {
return &v1alpha1.TCPSocketAction{}
}

View File

@ -40,10 +40,16 @@ output: {
name: parameter["kubernetes"]["name"]
}
}
if parameter["endpoints"] != _|_ {
endpoints: parameter["endpoints"]
}
}
if parameter["port"] != _|_ {
ports: parameter["port"]
}
if parameter["probe"] != _|_ {
probe: parameter["probe"]
}
}
}
@ -52,24 +58,46 @@ parameter: {
namespace?: string
name: string
}
endpoints?: [...{
address: string
name?: string
protocol?: string
clientSecret?: string
}]
port?: [...{
name: string
port: >0 & <=65533
openInner: bool
openOuter: bool
}]
probe?: {
httpGet?: {
path?: string
httpHeaders?: [...{
name?: string
vale?: string
}]
}
tcpSocket?:{
}
timeoutSeconds?: >0 & <=65533
periodSeconds?: >0 & <=65533
successThreshold?: >0 & <=65533
failureThreshold?: >0 & <=65533
}
}
`
var thirdComponetDefineName = "core-thirdcomponent"
var thirdComponetDefine = v1alpha1.ComponentDefinition{
var thirdComponentDefineName = "core-thirdcomponent"
var thirdComponentDefine = v1alpha1.ComponentDefinition{
TypeMeta: v1.TypeMeta{
Kind: "ComponentDefinition",
APIVersion: "rainbond.io/v1alpha1",
},
ObjectMeta: v1.ObjectMeta{
Name: thirdComponetDefineName,
Name: thirdComponentDefineName,
Annotations: map[string]string{
"definition.oam.dev/description": "Rainbond built-in component type that defines third-party service components.",
"version": "0.2",
},
},
Spec: v1alpha1.ComponentDefinitionSpec{

View File

@ -162,7 +162,7 @@ func (s *startController) startOne(app v1.AppService) error {
if ingresses := app.GetIngress(true); ingresses != nil {
for _, ingress := range ingresses {
if len(ingress.ResourceVersion) == 0 {
_, err := s.manager.client.ExtensionsV1beta1().Ingresses(app.TenantID).Create(s.ctx, ingress, metav1.CreateOptions{})
_, err := s.manager.client.NetworkingV1().Ingresses(app.TenantID).Create(s.ctx, ingress, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create ingress failure:%s", err.Error())
}

View File

@ -101,9 +101,6 @@ func InitAppService(dbmanager db.Manager, serviceID string, configs map[string]s
}
return appService, nil
}
if appService.IsThirdComponent() {
return appService, nil
}
for _, c := range conversionList {
if len(enableConversionList) == 0 || util.StringArrayContains(enableConversionList, c.Name) {
if err := c.Conversion(appService, dbmanager); err != nil {

View File

@ -23,15 +23,15 @@ import (
"os"
"strconv"
"strings"
"github.com/goodrain/rainbond/util/k8s"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/gateway/annotations/parser"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
@ -89,7 +89,6 @@ type AppServiceBuild struct {
appService *v1.AppService
replicationType string
dbmanager db.Manager
logger event.Logger
}
//AppServiceBuilder returns a AppServiceBuild
@ -144,7 +143,7 @@ func (a *AppServiceBuild) Build() (*v1.K8sResources, error) {
}
var services []*corev1.Service
var ingresses []*extensions.Ingress
var ingresses []*networkingv1.Ingress
var secrets []*corev1.Secret
if len(ports) > 0 {
for i := range ports {
@ -194,8 +193,8 @@ func (a *AppServiceBuild) Build() (*v1.K8sResources, error) {
// ApplyRules applies http rules and tcp rules
func (a AppServiceBuild) ApplyRules(serviceID string, containerPort, pluginContainerPort int,
service *corev1.Service) ([]*extensions.Ingress, []*corev1.Secret, error) {
var ingresses []*extensions.Ingress
service *corev1.Service) ([]*networkingv1.Ingress, []*corev1.Secret, error) {
var ingresses []*networkingv1.Ingress
var secrets []*corev1.Secret
httpRules, err := a.dbmanager.HTTPRuleDao().GetHTTPRuleByServiceIDAndContainerPort(serviceID, containerPort)
if err != nil {
@ -203,7 +202,7 @@ func (a AppServiceBuild) ApplyRules(serviceID string, containerPort, pluginConta
}
// create http ingresses
logrus.Debugf("find %d count http rule", len(httpRules))
if httpRules != nil && len(httpRules) > 0 {
if len(httpRules) > 0 {
for _, httpRule := range httpRules {
ing, sec, err := a.applyHTTPRule(httpRule, containerPort, pluginContainerPort, service)
if err != nil {
@ -222,7 +221,7 @@ func (a AppServiceBuild) ApplyRules(serviceID string, containerPort, pluginConta
if err != nil {
logrus.Infof("Can't get TCPRule corresponding to ServiceID(%s): %v", serviceID, err)
}
if tcpRules != nil && len(tcpRules) > 0 {
if len(tcpRules) > 0 {
for _, tcpRule := range tcpRules {
ing, err := a.applyTCPRule(tcpRule, service, a.tenant.UUID)
if err != nil {
@ -239,7 +238,7 @@ func (a AppServiceBuild) ApplyRules(serviceID string, containerPort, pluginConta
// applyTCPRule applies stream rule into ingress
func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, pluginContainerPort int,
service *corev1.Service) (ing *extensions.Ingress, sec *corev1.Secret, err error) {
service *corev1.Service) (ing *networkingv1.Ingress, sec *corev1.Secret, err error) {
// deal with empty path and domain
path := strings.Replace(rule.Path, " ", "", -1)
if path == "" {
@ -251,24 +250,29 @@ func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, plu
}
// create ingress
ing = &extensions.Ingress{
ing = &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: rule.UUID,
Namespace: a.tenant.UUID,
Labels: a.appService.GetCommonLabels(),
},
Spec: extensions.IngressSpec{
Rules: []extensions.IngressRule{
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: domain,
IngressRuleValue: extensions.IngressRuleValue{
HTTP: &extensions.HTTPIngressRuleValue{
Paths: []extensions.HTTPIngressPath{
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: path,
Backend: extensions.IngressBackend{
ServiceName: service.Name,
ServicePort: intstr.FromInt(pluginContainerPort),
Path: path,
PathType: k8s.IngressPathType(networkingv1.PathTypeExact),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: service.Name,
Port: networkingv1.ServiceBackendPort{
Number: int32(pluginContainerPort),
},
},
},
},
},
@ -297,10 +301,10 @@ func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, plu
if rule.CertificateID != "" {
cert, err := a.dbmanager.CertificateDao().GetCertificateByID(rule.CertificateID)
if err != nil {
return nil, nil, fmt.Errorf("Cant not get certificate by id(%s): %v", rule.CertificateID, err)
return nil, nil, fmt.Errorf("cant not get certificate by id(%s): %v", rule.CertificateID, err)
}
if cert == nil || strings.TrimSpace(cert.Certificate) == "" || strings.TrimSpace(cert.PrivateKey) == "" {
return nil, nil, fmt.Errorf("Rule id: %s; certificate not found", rule.UUID)
return nil, nil, fmt.Errorf("rule id: %s; certificate not found", rule.UUID)
}
// create secret
sec = &corev1.Secret{
@ -315,7 +319,7 @@ func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, plu
},
Type: corev1.SecretTypeOpaque,
}
ing.Spec.TLS = []extensions.IngressTLS{
ing.Spec.TLS = []networkingv1.IngressTLS{
{
Hosts: []string{domain},
SecretName: sec.Name,
@ -356,7 +360,7 @@ func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, plu
if err != nil {
return nil, nil, err
}
if configs != nil && len(configs) > 0 {
if len(configs) > 0 {
for _, cfg := range configs {
annos[parser.GetAnnotationWithPrefix(cfg.Key)] = cfg.Value
}
@ -367,18 +371,22 @@ func (a *AppServiceBuild) applyHTTPRule(rule *model.HTTPRule, containerPort, plu
}
// applyTCPRule applies stream rule into ingress
func (a *AppServiceBuild) applyTCPRule(rule *model.TCPRule, service *corev1.Service, namespace string) (ing *extensions.Ingress, err error) {
func (a *AppServiceBuild) applyTCPRule(rule *model.TCPRule, service *corev1.Service, namespace string) (ing *networkingv1.Ingress, err error) {
// create ingress
ing = &extensions.Ingress{
ing = &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: rule.UUID,
Namespace: namespace,
Labels: a.appService.GetCommonLabels(),
},
Spec: extensions.IngressSpec{
Backend: &extensions.IngressBackend{
ServiceName: service.Name,
ServicePort: intstr.FromInt(int(service.Spec.Ports[0].Port)),
Spec: networkingv1.IngressSpec{
DefaultBackend: &networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: service.Name,
Port: networkingv1.ServiceBackendPort{
Number: int32(service.Spec.Ports[0].Port),
},
},
},
},
}
@ -526,8 +534,7 @@ func (a *AppServiceBuild) createOuterService(port *model.TenantServicesPort) *co
servicePort.Name = fmt.Sprintf("%s-%d",
strings.ToLower(string(servicePort.Protocol)), port.ContainerPort)
servicePort.Port = int32(port.ContainerPort)
var portType corev1.ServiceType
portType = corev1.ServiceTypeClusterIP
portType := corev1.ServiceTypeClusterIP
spec := corev1.ServiceSpec{
Ports: []corev1.ServicePort{servicePort},
Type: portType,

View File

@ -440,7 +440,7 @@ func createPluginEnvs(pluginID, tenantID, serviceAlias string, mainEnvs []v1.Env
}
func createPluginResources(memory int, cpu int) v1.ResourceRequirements {
return createResourcesByDefaultCPU(memory, int64(cpu), int64(cpu))
return createResourcesBySetting(memory, int64(cpu), int64(cpu), 0)
}
func createTCPUDPMeshRecources(as *typesv1.AppService) v1.ResourceRequirements {
@ -458,12 +458,12 @@ func createTCPUDPMeshRecources(as *typesv1.AppService) v1.ResourceRequirements {
memory = requestint
}
}
return createResourcesByDefaultCPU(memory, cpu, func() int64 {
return createResourcesBySetting(memory, cpu, func() int64 {
if cpu < 120 {
return 120
}
return cpu
}())
}(), 0)
}
func xdsHostIPEnv(xdsHost string) corev1.EnvVar {

View File

@ -19,39 +19,34 @@
package conversion
import (
"fmt"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
//Allocate the CPU at the ratio of 4g memory to 1 core CPU
func createResourcesByDefaultCPU(memory int, setCPURequest, setCPULimit int64) corev1.ResourceRequirements {
var cpuRequest, cpuLimit int64
base := int64(memory) / 128
if base <= 0 {
base = 1
}
if memory < 512 {
cpuRequest, cpuLimit = base*30, base*80
} else if memory <= 1024 {
cpuRequest, cpuLimit = base*30, base*160
} else {
cpuRequest, cpuLimit = base*30, ((int64(memory)-1024)/1024*500 + 1280)
func createResourcesBySetting(memory int, setCPURequest, setCPULimit, setGPULimit int64) corev1.ResourceRequirements {
limits := corev1.ResourceList{}
request := corev1.ResourceList{}
if memory > 0 {
limits[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory*1024*1024), resource.BinarySI)
}
if setCPULimit > 0 {
cpuLimit = setCPULimit
limits[corev1.ResourceCPU] = *resource.NewMilliQuantity(setCPULimit, resource.DecimalSI)
}
if setGPULimit > 0 {
gpuLimit, err := resource.ParseQuantity(fmt.Sprintf("%d", setGPULimit))
if err != nil {
logrus.Errorf("gpu request is invalid")
} else {
limits[getGPULableKey()] = gpuLimit
}
}
if setCPURequest > 0 {
cpuRequest = setCPURequest
request[corev1.ResourceCPU] = *resource.NewMilliQuantity(setCPURequest, resource.DecimalSI)
}
limits := corev1.ResourceList{}
limits[corev1.ResourceCPU] = *resource.NewMilliQuantity(cpuLimit, resource.DecimalSI)
limits[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory*1024*1024), resource.BinarySI)
request := corev1.ResourceList{}
request[corev1.ResourceCPU] = *resource.NewMilliQuantity(cpuRequest, resource.DecimalSI)
request[corev1.ResourceMemory] = *resource.NewQuantity(int64(memory*1024*1024), resource.BinarySI)
return corev1.ResourceRequirements{
Limits: limits,
Requests: request,

View File

@ -38,7 +38,6 @@ import (
"github.com/jinzhu/gorm"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
@ -498,16 +497,11 @@ func createResources(as *v1.AppService) corev1.ResourceRequirements {
cpuRequest = int64(requestint)
}
}
rr := createResourcesByDefaultCPU(as.ContainerMemory, cpuRequest, cpuLimit)
// support set gpu, support application of single GPU video memory.
if as.ContainerGPU > 0 {
gpuLimit, err := resource.ParseQuantity(fmt.Sprintf("%d", as.ContainerGPU))
if err != nil {
logrus.Errorf("gpu request is invalid")
} else {
rr.Limits[getGPULableKey()] = gpuLimit
}
if as.ContainerCPU > 0 && cpuRequest == 0 && cpuLimit == 0{
cpuLimit = int64(as.ContainerCPU)
cpuRequest = int64(as.ContainerCPU)
}
rr := createResourcesBySetting(as.ContainerMemory, cpuRequest, cpuLimit, int64(as.ContainerGPU))
return rr
}

View File

@ -31,7 +31,7 @@ import (
"github.com/sirupsen/logrus"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
@ -189,11 +189,11 @@ func persistUpdate(service *corev1.Service, clientSet kubernetes.Interface) erro
return err
}
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface) {
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.Background(), ingress, metav1.UpdateOptions{})
func ensureIngress(ingress *networkingv1.Ingress, clientSet kubernetes.Interface) {
_, err := clientSet.NetworkingV1().Ingresses(ingress.Namespace).Update(context.Background(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{})
_, err := clientSet.NetworkingV1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
logrus.Errorf("error creating ingress %+v: %v", ingress, err)
}
@ -297,12 +297,12 @@ func EnsureHPA(new *autoscalingv2.HorizontalPodAutoscaler, clientSet kubernetes.
}
}
// UpgradeIngress is used to update *extensions.Ingress.
// UpgradeIngress is used to update *networkingv1.Ingress.
func UpgradeIngress(clientset kubernetes.Interface,
as *v1.AppService,
old, new []*extensions.Ingress,
old, new []*networkingv1.Ingress,
handleErr func(msg string, err error) error) error {
var oldMap = make(map[string]*extensions.Ingress, len(old))
var oldMap = make(map[string]*networkingv1.Ingress, len(old))
for i, item := range old {
oldMap[item.Name] = old[i]
}
@ -310,7 +310,7 @@ func UpgradeIngress(clientset kubernetes.Interface,
if o, ok := oldMap[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
ing, err := clientset.NetworkingV1().Ingresses(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error updating ingress: %+v: err: %v",
ing, err), err); err != nil {
@ -323,7 +323,7 @@ func UpgradeIngress(clientset kubernetes.Interface,
logrus.Debugf("ServiceID: %s; successfully update ingress: %s", as.ServiceID, ing.Name)
} else {
logrus.Debugf("ingress: %+v", n)
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
ing, err := clientset.NetworkingV1().Ingresses(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating ingress: %+v: err: %v",
ing, err), err); err != nil {

View File

@ -1,294 +0,0 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package prober
import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/model"
uitlprober "github.com/goodrain/rainbond/util/prober"
v1 "github.com/goodrain/rainbond/util/prober/types/v1"
"github.com/goodrain/rainbond/worker/appm/store"
"github.com/goodrain/rainbond/worker/appm/thirdparty/discovery"
appmv1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
)
// Prober is the interface that wraps the required methods to maintain status
// about upstream servers(Endpoints) associated with a third-party service.
type Prober interface {
Start()
Stop()
UpdateProbes(info []*store.ProbeInfo)
StopProbe(uuids []string)
IsUsedProbe(sid string) bool
}
// NewProber creates a new third-party service prober.
func NewProber(store store.Storer,
probeCh *channels.RingChannel,
updateCh *channels.RingChannel) Prober {
ctx, cancel := context.WithCancel(context.Background())
return &tpProbe{
utilprober: uitlprober.NewProber(ctx, cancel),
dbm: db.GetManager(),
store: store,
updateCh: updateCh,
probeCh: probeCh,
watcher: make(map[string]map[string]uitlprober.Watcher),
ctx: ctx,
cancel: cancel,
}
}
// third-party service probe
type tpProbe struct {
utilprober uitlprober.Prober
dbm db.Manager
store store.Storer
probeCh *channels.RingChannel
updateCh *channels.RingChannel
ctx context.Context
cancel context.CancelFunc
watcher map[string]map[string]uitlprober.Watcher
lock sync.Mutex
}
func createService(probe *model.TenantServiceProbe) *v1.Service {
return &v1.Service{
Disable: probe.IsUsed == nil || *probe.IsUsed != 1,
ServiceHealth: &v1.Health{
Model: probe.Scheme,
TimeInterval: probe.PeriodSecond,
MaxErrorsNum: probe.FailureThreshold,
MaxTimeoutSecond: probe.TimeoutSecond,
},
}
}
func (t *tpProbe) Start() {
t.utilprober.Start()
go func() {
for {
select {
case event := <-t.probeCh.Out():
if event == nil {
return
}
evt := event.(store.Event)
switch evt.Type {
case store.CreateEvent:
infos := evt.Obj.([]*store.ProbeInfo)
t.UpdateProbes(infos)
case store.UpdateEvent:
infos := evt.Obj.([]*store.ProbeInfo)
t.UpdateProbes(infos)
case store.DeleteEvent:
uuids := evt.Obj.([]string)
t.StopProbe(uuids)
}
case <-t.ctx.Done():
return
}
}
}()
}
// Stop stops prober.
func (t *tpProbe) Stop() {
t.cancel()
}
func (t *tpProbe) UpdateProbes(infos []*store.ProbeInfo) {
t.lock.Lock()
defer t.lock.Unlock()
var services []*v1.Service
for _, info := range infos {
service, probeInfo := t.createServices(info)
if service == nil {
t.utilprober.StopProbes([]string{info.UUID})
continue
}
services = append(services, service)
// watch
if swatchers, exist := t.watcher[service.Sid]; exist && swatchers != nil {
if watcher, exist := swatchers[service.Name]; exist && watcher != nil {
continue
}
} else {
t.watcher[service.Sid] = make(map[string]uitlprober.Watcher)
}
logrus.Infof("create probe[sid: %s, address: %s, port: %d]", service.Sid, service.ServiceHealth.Address, service.ServiceHealth.Port)
watcher := t.utilprober.WatchServiceHealthy(service.Name)
t.utilprober.EnableWatcher(watcher.GetServiceName(), watcher.GetID())
t.watcher[service.Sid][service.Name] = watcher
go func(watcher uitlprober.Watcher, info *store.ProbeInfo) {
defer watcher.Close()
defer t.utilprober.DisableWatcher(watcher.GetServiceName(), watcher.GetID())
defer delete(t.watcher[service.Sid], service.Name)
for {
select {
case event, ok := <-watcher.Watch():
if !ok {
return
}
if event == nil {
logrus.Errorf("get nil event from prober status chan, will retry")
time.Sleep(time.Second * 3)
}
switch event.Status {
case v1.StatHealthy:
obj := &appmv1.RbdEndpoint{
UUID: info.UUID,
IP: info.IP,
Port: int(info.Port),
Sid: info.Sid,
}
t.updateCh.In() <- discovery.Event{
Type: discovery.HealthEvent,
Obj: obj,
}
case v1.StatDeath, v1.StatUnhealthy:
if event.ErrorNumber > service.ServiceHealth.MaxErrorsNum {
if probeInfo.Mode == model.OfflineFailureAction.String() {
obj := &appmv1.RbdEndpoint{
UUID: info.UUID,
IP: info.IP,
Port: int(info.Port),
Sid: info.Sid,
}
t.updateCh.In() <- discovery.Event{
Type: discovery.UnhealthyEvent,
Obj: obj,
}
}
}
}
case <-t.ctx.Done():
// TODO: should stop for one service, not all services.
logrus.Infof("third app %s probe watcher exist", service.Name)
return
}
}
}(watcher, info)
}
//Method internally to determine if the configuration has changed
//remove old address probe
t.utilprober.UpdateServicesProbe(services)
}
func (t *tpProbe) StopProbe(uuids []string) {
for _, name := range uuids {
t.utilprober.StopProbes([]string{name})
}
}
// GetProbeInfo returns probe info associated with sid.
// If there is a probe in the database, return directly
// If there is no probe in the database, return a default probe
func (t *tpProbe) GetProbeInfo(sid string) (*model.TenantServiceProbe, error) {
probes, err := t.dbm.ServiceProbeDao().GetServiceProbes(sid)
if err != nil || probes == nil || len(probes) == 0 || *(probes[0].IsUsed) == 0 {
if err != nil {
logrus.Warningf("ServiceID: %s; error getting probes: %v", sid, err)
}
return nil, nil
}
return probes[0], nil
}
func (t *tpProbe) IsUsedProbe(sid string) bool {
if p, _ := t.GetProbeInfo(sid); p != nil {
return true
}
return false
}
func (t *tpProbe) createServices(probeInfo *store.ProbeInfo) (*v1.Service, *model.TenantServiceProbe) {
if probeInfo.IP == "1.1.1.1" {
app := t.store.GetAppService(probeInfo.Sid)
if len(app.GetServices(true)) >= 1 {
appService := app.GetServices(true)[0]
if appService.Annotations != nil && appService.Annotations["domain"] != "" {
probeInfo.IP = appService.Annotations["domain"]
logrus.Debugf("domain address is : %s", probeInfo.IP)
}
}
if probeInfo.IP == "1.1.1.1" {
logrus.Warningf("serviceID: %s, is a domain thirdpart endpoint, but do not found domain info", probeInfo.Sid)
return nil, nil
}
}
tsp, err := t.GetProbeInfo(probeInfo.Sid)
if err != nil {
logrus.Warningf("ServiceID: %s; Unexpected error occurred, ignore the creation of "+
"probes: %s", probeInfo.Sid, err.Error())
return nil, nil
}
if tsp == nil {
return nil, nil
}
if tsp.Mode == "liveness" {
tsp.Mode = model.IgnoreFailureAction.String()
}
service := createService(tsp)
service.Sid = probeInfo.Sid
service.Name = probeInfo.UUID
service.ServiceHealth.Port = int(probeInfo.Port)
service.ServiceHealth.Name = service.Name
address := fmt.Sprintf("%s:%d", probeInfo.IP, probeInfo.Port)
if service.ServiceHealth.Model == "tcp" {
address = parseTCPHostAddress(probeInfo.IP, probeInfo.Port)
}
service.ServiceHealth.Address = address
return service, tsp
}
func (t *tpProbe) createServiceNames(ep *corev1.Endpoints) string {
return ep.GetLabels()["uuid"]
}
func parseTCPHostAddress(address string, port int32) string {
if strings.HasPrefix(address, "https://") {
address = strings.Split(address, "https://")[1]
}
if strings.HasPrefix(address, "http://") {
address = strings.Split(address, "http://")[1]
}
if strings.Contains(address, ":") {
address = strings.Split(address, ":")[0]
}
ns, err := net.LookupHost(address)
if err != nil || len(ns) == 0 {
return address
}
address = ns[0]
address = fmt.Sprintf("%s:%d", address, port)
return address
}

View File

@ -1,8 +0,0 @@
package prober
import "testing"
func TestParseTCPHostAddress(t *testing.T) {
re := parseTCPHostAddress("rm-2ze0xlsi14xz6q6sz.mysql.rds.aliyuncs.com", 3306)
t.Log(re)
}

View File

@ -24,13 +24,13 @@ import (
appsv1 "k8s.io/client-go/listers/apps/v1"
autoscalingv2 "k8s.io/client-go/listers/autoscaling/v2beta2"
corev1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/listers/extensions/v1beta1"
networkingv1 "k8s.io/client-go/listers/networking/v1"
storagev1 "k8s.io/client-go/listers/storage/v1"
)
//Lister kube-api client cache
type Lister struct {
Ingress v1beta1.IngressLister
Ingress networkingv1.IngressLister
Service corev1.ServiceLister
Secret corev1.SecretLister
StatefulSet appsv1.StatefulSetLister

View File

@ -25,7 +25,6 @@ import (
"sync"
"time"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/api/util/bcode"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/db"
@ -37,7 +36,6 @@ import (
k8sutil "github.com/goodrain/rainbond/util/k8s"
"github.com/goodrain/rainbond/worker/appm/componentdefinition"
"github.com/goodrain/rainbond/worker/appm/conversion"
"github.com/goodrain/rainbond/worker/appm/f"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/server/pb"
workerutil "github.com/goodrain/rainbond/worker/util"
@ -49,7 +47,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
storagev1 "k8s.io/api/storage/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
@ -92,7 +90,6 @@ type Storer interface {
UnRegistPodUpdateListener(string)
RegisterVolumeTypeListener(string, chan<- *model.TenantServiceVolumeType)
UnRegisterVolumeTypeListener(string)
InitOneThirdPartService(service *model.TenantServices) error
GetCrds() ([]*apiextensions.CustomResourceDefinition, error)
GetCrd(name string) (*apiextensions.CustomResourceDefinition, error)
GetServiceMonitorClient() (*versioned.Clientset, error)
@ -125,14 +122,6 @@ type Event struct {
Obj interface{}
}
// ProbeInfo holds the context of a probe.
type ProbeInfo struct {
Sid string `json:"sid"`
UUID string `json:"uuid"`
IP string `json:"ip"`
Port int32 `json:"port"`
}
//appRuntimeStore app runtime store
//cache all kubernetes object and appservice
type appRuntimeStore struct {
@ -150,7 +139,6 @@ type appRuntimeStore struct {
appCount int32
dbmanager db.Manager
conf option.Config
startCh *channels.RingChannel
stopch chan struct{}
podUpdateListeners map[string]chan<- *corev1.Pod
podUpdateListenerLock sync.Mutex
@ -165,9 +153,7 @@ func NewStore(
clientset kubernetes.Interface,
rainbondClient rainbondversioned.Interface,
dbmanager db.Manager,
conf option.Config,
startCh *channels.RingChannel,
probeCh *channels.RingChannel) Storer {
conf option.Config) Storer {
ctx, cancel := context.WithCancel(context.Background())
store := &appRuntimeStore{
kubeconfig: kubeconfig,
@ -181,7 +167,6 @@ func NewStore(
conf: conf,
dbmanager: dbmanager,
crClients: make(map[string]interface{}),
startCh: startCh,
resourceCache: NewResourceCache(),
podUpdateListeners: make(map[string]chan<- *corev1.Pod, 1),
volumeTypeListeners: make(map[string]chan<- *model.TenantServiceVolumeType, 1),
@ -222,7 +207,7 @@ func NewStore(
store.listers.ConfigMap = infFactory.Core().V1().ConfigMaps().Lister()
store.informers.Ingress = infFactory.Extensions().V1beta1().Ingresses().Informer()
store.listers.Ingress = infFactory.Extensions().V1beta1().Ingresses().Lister()
store.listers.Ingress = infFactory.Networking().V1().Ingresses().Lister()
store.informers.ReplicaSet = infFactory.Apps().V1().ReplicaSets().Informer()
store.listers.ReplicaSets = infFactory.Apps().V1().ReplicaSets().Lister()
@ -255,9 +240,6 @@ func NewStore(
store.informers.ComponentDefinition = rainbondInformer.Rainbond().V1alpha1().ComponentDefinitions().Informer()
store.informers.ComponentDefinition.AddEventHandlerWithResyncPeriod(componentdefinition.GetComponentDefinitionBuilder(), time.Second*300)
isThirdParty := func(ep *corev1.Endpoints) bool {
return ep.Labels["service-kind"] == model.ServiceKindThirdParty.String()
}
// Endpoint Event Handler
epEventHandler := cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
@ -273,15 +255,6 @@ func NewStore(
}
if appservice != nil {
appservice.AddEndpoints(ep)
if isThirdParty(ep) && ep.Subsets != nil && len(ep.Subsets) > 0 {
logrus.Debugf("received add endpoints: %+v", ep)
probeInfos := listProbeInfos(ep, serviceID)
probeCh.In() <- Event{
Type: CreateEvent,
Obj: probeInfos,
}
}
return
}
}
},
@ -298,17 +271,6 @@ func NewStore(
logrus.Debugf("ServiceID: %s; Action: DeleteFunc;service is closed", serviceID)
store.DeleteAppService(appservice)
}
if isThirdParty(ep) {
logrus.Debugf("received delete endpoints: %+v", ep)
var uuids []string
for _, item := range ep.Subsets {
uuids = append(uuids, item.Ports[0].Name)
}
probeCh.In() <- Event{
Type: DeleteEvent,
Obj: uuids,
}
}
}
}
},
@ -325,13 +287,6 @@ func NewStore(
}
if appservice != nil {
appservice.AddEndpoints(cep)
if isThirdParty(cep) {
curInfos := listProbeInfos(cep, serviceID)
probeCh.In() <- Event{
Type: UpdateEvent,
Obj: curInfos,
}
}
}
}
},
@ -365,51 +320,6 @@ func (a *appRuntimeStore) Lister() *Lister {
return a.listers
}
func listProbeInfos(ep *corev1.Endpoints, sid string) []*ProbeInfo {
var probeInfos []*ProbeInfo
addProbe := func(pi *ProbeInfo) {
for _, c := range probeInfos {
if c.IP == pi.IP && c.Port == pi.Port {
return
}
}
probeInfos = append(probeInfos, pi)
}
for _, subset := range ep.Subsets {
for _, port := range subset.Ports {
if ep.Annotations != nil {
if domain, ok := ep.Annotations["domain"]; ok && domain != "" {
logrus.Debugf("thirdpart service[sid: %s] add domain endpoint[domain: %s] probe", sid, domain)
probeInfos = []*ProbeInfo{{
Sid: sid,
UUID: fmt.Sprintf("%s_%d", domain, port.Port),
IP: domain,
Port: port.Port,
}}
return probeInfos
}
}
for _, address := range subset.NotReadyAddresses {
addProbe(&ProbeInfo{
Sid: sid,
UUID: fmt.Sprintf("%s_%d", address.IP, port.Port),
IP: address.IP,
Port: port.Port,
})
}
for _, address := range subset.Addresses {
addProbe(&ProbeInfo{
Sid: sid,
UUID: fmt.Sprintf("%s_%d", address.IP, port.Port),
IP: address.IP,
Port: port.Port,
})
}
}
}
return probeInfos
}
func (a *appRuntimeStore) init() error {
//init leader namespace
leaderNamespace := a.conf.LeaderElectionNamespace
@ -442,64 +352,11 @@ func (a *appRuntimeStore) Start() error {
// init core componentdefinition
componentdefinition.GetComponentDefinitionBuilder().InitCoreComponentDefinition(a.rainbondClient)
go func() {
a.initThirdPartyService()
a.initCustomResourceInformer(stopch)
}()
return nil
}
func (a *appRuntimeStore) initThirdPartyService() error {
logrus.Debugf("begin initializing third-party services.")
// TODO: list third party services that have open ports directly.
svcs, err := a.dbmanager.TenantServiceDao().ListThirdPartyServices()
if err != nil {
logrus.Errorf("error listing third-party services: %v", err)
return err
}
for _, svc := range svcs {
disCfg, _ := a.dbmanager.ThirdPartySvcDiscoveryCfgDao().GetByServiceID(svc.ServiceID)
if disCfg != nil && disCfg.Type == "kubernetes" {
continue
}
if err = a.InitOneThirdPartService(svc); err != nil {
logrus.Errorf("init thridpart service error: %v", err)
return err
}
a.startCh.In() <- &v1.Event{
Type: v1.StartEvent, // TODO: no need to distinguish between event types.
Sid: svc.ServiceID,
}
}
logrus.Infof("initializing third-party services success")
return nil
}
// InitOneThirdPartService init one thridpart service
func (a *appRuntimeStore) InitOneThirdPartService(service *model.TenantServices) error {
// ignore service without open port.
if !a.dbmanager.TenantServicesPortDao().HasOpenPort(service.ServiceID) {
return nil
}
appService, err := conversion.InitCacheAppService(a.dbmanager, service.ServiceID, "Rainbond")
if err != nil {
logrus.Errorf("error initializing cache app service: %v", err)
return err
}
if appService.IsCustomComponent() {
return nil
}
a.RegistAppService(appService)
err = f.ApplyOne(context.Background(), nil, a.clientset, appService)
if err != nil {
logrus.Errorf("error applying rule: %v", err)
return err
}
logrus.Infof("init third app %s kubernetes resource", appService.ServiceAlias)
return nil
}
//Ready if all kube informers is syncd, store is ready
func (a *appRuntimeStore) Ready() bool {
return a.informers.Ready()
@ -609,7 +466,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
}
}
}
if ingress, ok := obj.(*extensions.Ingress); ok {
if ingress, ok := obj.(*networkingv1.Ingress); ok {
serviceID := ingress.Labels["service_id"]
version := ingress.Labels["version"]
createrID := ingress.Labels["creater_id"]
@ -722,8 +579,8 @@ func (a *appRuntimeStore) getAppService(serviceID, version, createrID string, cr
}
func (a *appRuntimeStore) OnUpdate(oldObj, newObj interface{}) {
// ingress update maybe change owner component
if ingress, ok := newObj.(*extensions.Ingress); ok {
oldIngress := oldObj.(*extensions.Ingress)
if ingress, ok := newObj.(*networkingv1.Ingress); ok {
oldIngress := oldObj.(*networkingv1.Ingress)
if oldIngress.Labels["service_id"] != ingress.Labels["service_id"] {
logrus.Infof("ingress %s change owner component", oldIngress.Name)
serviceID := oldIngress.Labels["service_id"]
@ -832,7 +689,7 @@ func (a *appRuntimeStore) OnDeletes(objs ...interface{}) {
}
}
}
if ingress, ok := obj.(*extensions.Ingress); ok {
if ingress, ok := obj.(*networkingv1.Ingress); ok {
serviceID := ingress.Labels["service_id"]
version := ingress.Labels["version"]
createrID := ingress.Labels["creater_id"]

View File

@ -1,606 +0,0 @@
// RAINBOND, Application Management Platform
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package thirdparty
import (
"context"
"fmt"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/model"
validation "github.com/goodrain/rainbond/util/endpoint"
"github.com/goodrain/rainbond/worker/appm/f"
"github.com/goodrain/rainbond/worker/appm/prober"
"github.com/goodrain/rainbond/worker/appm/store"
"github.com/goodrain/rainbond/worker/appm/thirdparty/discovery"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// ThirdPartier is the interface that wraps the required methods to update status
// about upstream servers(Endpoints) associated with a third-party service.
type ThirdPartier interface {
Start()
}
// NewThirdPartier creates a new ThirdPartier.
func NewThirdPartier(clientset kubernetes.Interface,
store store.Storer,
startCh *channels.RingChannel,
updateCh *channels.RingChannel,
stopCh chan struct{},
prober prober.Prober) ThirdPartier {
t := &thirdparty{
clientset: clientset,
store: store,
svcStopCh: make(map[string]chan struct{}),
startCh: startCh,
updateCh: updateCh,
stopCh: stopCh,
prober: prober,
}
return t
}
type thirdparty struct {
clientset kubernetes.Interface
store store.Storer
prober prober.Prober
// a collection of stop channel for every service.
svcStopCh map[string]chan struct{}
startCh *channels.RingChannel
updateCh *channels.RingChannel
stopCh chan struct{}
}
// Start starts receiving event that update k8s endpoints status from start channel(startCh).
func (t *thirdparty) Start() {
go func() {
for {
select {
case event := <-t.updateCh.Out():
devent, ok := event.(discovery.Event)
if !ok {
logrus.Warningf("Unexpected event received %+v", event)
continue
}
t.runUpdate(devent)
case <-t.stopCh:
for _, stopCh := range t.svcStopCh {
close(stopCh)
}
return
}
}
}()
go func() {
for {
select {
case event := <-t.startCh.Out():
evt, ok := event.(*v1.Event)
if !ok {
logrus.Warningf("Unexpected event received %+v", event)
continue
}
logrus.Debugf("Received event: %+v", evt)
if evt.Type == v1.StartEvent { // no need to distinguish between event types
needWatch := false
stopCh := t.svcStopCh[evt.Sid]
if stopCh == nil {
logrus.Debugf("ServiceID: %s; already started.", evt.Sid)
needWatch = true
t.svcStopCh[evt.Sid] = make(chan struct{})
}
go t.runStart(evt.Sid, needWatch)
}
if evt.Type == v1.StopEvent {
stopCh := t.svcStopCh[evt.Sid]
if stopCh == nil {
logrus.Warningf("ServiceID: %s; The third-party service has not started yet, cant't be stoped", evt.Sid)
continue
}
t.runDelete(evt.Sid)
close(stopCh)
delete(t.svcStopCh, evt.Sid)
}
case <-t.stopCh:
for _, stopCh := range t.svcStopCh {
close(stopCh)
}
return
}
}
}()
}
func (t *thirdparty) runStart(sid string, needWatch bool) {
as := t.store.GetAppService(sid)
if as == nil {
logrus.Warnf("get app service from store failure, sid=%s", sid)
return
}
var err error
for i := 3; i > 0; i-- {
rbdeps, ir := t.ListRbdEndpoints(sid)
if rbdeps == nil || len(rbdeps) == 0 {
logrus.Warningf("ServiceID: %s;Empty rbd endpoints, stop starting third-party service.", sid)
continue
}
var eps []*corev1.Endpoints
eps, err = t.k8sEndpoints(as, rbdeps)
if err != nil {
logrus.Warningf("ServiceID: %s; error creating k8s endpoints: %s", sid, err.Error())
continue
}
for _, ep := range eps {
if err := f.EnsureEndpoints(ep, t.clientset); err != nil {
logrus.Errorf("create or update endpoint %s failure %s", ep.Name, err.Error())
}
}
for _, service := range as.GetServices(true) {
if err := f.EnsureService(service, t.clientset); err != nil {
logrus.Errorf("create or update service %s failure %s", service.Name, err.Error())
}
}
if needWatch && ir != nil {
ir.Watch()
}
logrus.Infof("ServiceID: %s; successfully running start task", sid)
return
}
logrus.Errorf("ServiceID: %s; error running start task: %v", sid, err)
}
// ListRbdEndpoints lists all rbd endpoints, include static and dynamic.
func (t *thirdparty) ListRbdEndpoints(sid string) ([]*v1.RbdEndpoint, Interacter) {
var res []*v1.RbdEndpoint
// static
s := NewStaticInteracter(sid)
slist, err := s.List()
if err != nil {
logrus.Warningf("ServiceID: %s;error listing static rbd endpoints: %v", sid, err)
}
if slist != nil && len(slist) > 0 {
res = append(res, slist...)
}
d := NewDynamicInteracter(sid, t.updateCh, t.stopCh)
if d != nil {
dlist, err := d.List()
if err != nil {
logrus.Warningf("ServiceID: %s;error listing dynamic rbd endpoints: %v", sid, err)
}
if dlist != nil && len(dlist) > 0 {
res = append(res, dlist...)
}
}
return res, d
}
func deleteSubset(as *v1.AppService, rbdep *v1.RbdEndpoint) {
eps := as.GetEndpoints(true)
for _, ep := range eps {
for idx, item := range ep.Subsets {
if item.Ports[0].Name == rbdep.UUID {
logrus.Debugf("UUID: %s; subset deleted", rbdep.UUID)
ep.Subsets[idx] = ep.Subsets[len(ep.Subsets)-1]
ep.Subsets = ep.Subsets[:len(ep.Subsets)-1]
}
isDomain := false
for _, addr := range item.Addresses {
if addr.IP == "1.1.1.1" {
isDomain = true
}
}
for _, addr := range item.NotReadyAddresses {
if addr.IP == "1.1.1.1" {
isDomain = true
}
}
if isDomain {
for _, service := range as.GetServices(true) {
if service.Annotations != nil {
if rbdep.IP == service.Annotations["domain"] {
delete(service.Annotations, "domain")
}
}
}
}
}
}
}
func (t *thirdparty) k8sEndpoints(as *v1.AppService, epinfo []*v1.RbdEndpoint) ([]*corev1.Endpoints, error) {
ports, err := db.GetManager().TenantServicesPortDao().GetPortsByServiceID(as.ServiceID)
if err != nil {
return nil, err
}
// third-party service can only have one port
if len(ports) == 0 {
return nil, fmt.Errorf("port not found")
}
p := ports[0]
var res []*corev1.Endpoints
if *p.IsInnerService {
ep := &corev1.Endpoints{}
ep.Namespace = as.TenantID
// inner or outer
if *p.IsInnerService {
ep.Name = fmt.Sprintf("service-%d-%d", p.ID, p.ContainerPort)
if p.K8sServiceName != "" {
ep.Name = p.K8sServiceName
}
ep.Labels = as.GetCommonLabels(map[string]string{
"name": as.ServiceAlias + "Service",
"service-kind": model.ServiceKindThirdParty.String(),
})
}
res = append(res, ep)
}
if *p.IsOuterService {
ep := &corev1.Endpoints{}
ep.Namespace = as.TenantID
// inner or outer
if *p.IsOuterService {
ep.Name = fmt.Sprintf("service-%d-%dout", p.ID, p.ContainerPort)
ep.Labels = as.GetCommonLabels(map[string]string{
"name": as.ServiceAlias + "ServiceOUT",
"service-kind": model.ServiceKindThirdParty.String(),
})
}
res = append(res, ep)
}
var subsets []corev1.EndpointSubset
for _, epi := range epinfo {
logrus.Debugf("make endpoints[address: %s] subset", epi.IP)
subset := corev1.EndpointSubset{
Ports: []corev1.EndpointPort{
{
Name: epi.UUID,
Port: func(targetPort int, realPort int) int32 {
if realPort == 0 {
return int32(targetPort)
}
return int32(realPort)
}(p.ContainerPort, epi.Port),
Protocol: corev1.ProtocolTCP,
},
},
}
eaddressIP := epi.IP
address := validation.SplitEndpointAddress(epi.IP)
if validation.IsDomainNotIP(address) {
if len(as.GetServices(false)) > 0 {
annotations := as.GetServices(false)[0].Annotations
if annotations == nil {
annotations = make(map[string]string)
}
annotations["domain"] = epi.IP
as.GetServices(false)[0].Annotations = annotations
}
eaddressIP = "1.1.1.1"
}
eaddress := []corev1.EndpointAddress{
{
IP: eaddressIP,
},
}
useProbe := t.prober.IsUsedProbe(as.ServiceID)
if useProbe {
subset.NotReadyAddresses = eaddress
} else {
subset.Addresses = eaddress
}
subsets = append(subsets, subset)
}
//all endpoint for one third app is same
for _, item := range res {
item.Subsets = subsets
}
return res, nil
}
func (t *thirdparty) createSubsetForAllEndpoint(as *v1.AppService, rbdep *v1.RbdEndpoint) error {
port, err := db.GetManager().TenantServicesPortDao().GetPortsByServiceID(as.ServiceID)
if err != nil {
return err
}
// third-party service can only have one port
if port == nil || len(port) == 0 {
return fmt.Errorf("Port not found")
}
ipAddress := rbdep.IP
address := validation.SplitEndpointAddress(rbdep.IP)
if validation.IsDomainNotIP(address) {
//domain endpoint set ip is 1.1.1.1
ipAddress = "1.1.1.1"
if len(as.GetServices(false)) > 0 {
annotations := as.GetServices(false)[0].Annotations
if annotations == nil {
annotations = make(map[string]string)
}
annotations["domain"] = rbdep.IP
as.GetServices(false)[0].Annotations = annotations
}
}
subset := corev1.EndpointSubset{
Ports: []corev1.EndpointPort{
{
Name: rbdep.UUID,
Port: func() int32 {
//if endpoint have port, will ues this port
//or use service port
if rbdep.Port != 0 {
return int32(rbdep.Port)
}
return int32(port[0].ContainerPort)
}(),
Protocol: corev1.ProtocolTCP,
},
},
}
eaddress := []corev1.EndpointAddress{
{
IP: ipAddress,
},
}
useProbe := t.prober.IsUsedProbe(as.ServiceID)
if useProbe {
subset.NotReadyAddresses = eaddress
} else {
subset.Addresses = eaddress
}
for _, ep := range as.GetEndpoints(true) {
existPort := false
existAddress := false
for i, item := range ep.Subsets {
for _, port := range item.Ports {
if port.Port == int32(subset.Ports[0].Port) && len(item.Ports) < 2 {
for _, a := range item.Addresses {
if a.IP == ipAddress {
existAddress = true
break
}
}
for _, a := range item.NotReadyAddresses {
if a.IP == ipAddress {
existAddress = true
break
}
}
if !existAddress {
if useProbe {
ep.Subsets[i].NotReadyAddresses = append(ep.Subsets[i].NotReadyAddresses, subset.NotReadyAddresses...)
} else {
ep.Subsets[i].Addresses = append(ep.Subsets[i].NotReadyAddresses, subset.Addresses...)
}
}
existPort = true
}
}
}
if !existPort {
ep.Subsets = append(ep.Subsets, subset)
}
if err := f.EnsureEndpoints(ep, t.clientset); err != nil {
logrus.Errorf("update endpoint %s failure %s", ep.Name, err.Error())
}
}
return nil
}
func (t *thirdparty) runUpdate(event discovery.Event) {
updateAddress := func(as *v1.AppService, rbdep *v1.RbdEndpoint, ready bool) {
ad := validation.SplitEndpointAddress(rbdep.IP)
for _, ep := range as.GetEndpoints(true) {
var needUpdate bool
for idx, subset := range ep.Subsets {
for _, port := range subset.Ports {
address := subset.Addresses
if ready {
address = subset.NotReadyAddresses
}
for i, addr := range address {
ipequal := fmt.Sprintf("%s_%d", addr.IP, port.Port) == fmt.Sprintf("%s_%d", rbdep.IP, rbdep.Port)
if (addr.IP == "1.1.1.1" && validation.IsDomainNotIP(ad)) || ipequal {
if validation.IsDomainNotIP(ad) {
rbdep.IP = "1.1.1.1"
}
ep.Subsets[idx] = updateSubsetAddress(ready, subset, address[i])
needUpdate = true
break
}
}
logrus.Debugf("not found need update address by %s", fmt.Sprintf("%s_%d", rbdep.IP, rbdep.Port))
}
}
if needUpdate {
if err := f.EnsureEndpoints(ep, t.clientset); err != nil {
logrus.Errorf("update endpoint %s failure %s", ep.Name, err.Error())
}
}
}
}
// do not have multiple ports, multiple addresses
removeAddress := func(as *v1.AppService, rbdep *v1.RbdEndpoint) {
ad := validation.SplitEndpointAddress(rbdep.IP)
for _, ep := range as.GetEndpoints(true) {
var needUpdate bool
var newSubsets []corev1.EndpointSubset
for idx, subset := range ep.Subsets {
var handleSubset bool
for i, port := range subset.Ports {
address := append(subset.Addresses, subset.NotReadyAddresses...)
for j, addr := range address {
ipequal := fmt.Sprintf("%s_%d", addr.IP, port.Port) == fmt.Sprintf("%s_%d", rbdep.IP, rbdep.Port)
if (addr.IP == "1.1.1.1" && validation.IsDomainNotIP(ad)) || ipequal {
//multiple port remove port, Instead remove the address
if len(subset.Ports) > 1 {
subset.Ports = append(subset.Ports[:i], subset.Ports[:i]...)
newSubsets = append(newSubsets, subset)
} else {
if validation.IsDomainNotIP(ad) {
rbdep.IP = "1.1.1.1"
}
newsub := removeSubsetAddress(ep.Subsets[idx], address[j])
if len(newsub.Addresses) != 0 || len(newsub.NotReadyAddresses) != 0 {
newSubsets = append(newSubsets, newsub)
}
}
needUpdate = true
handleSubset = true
break
}
}
}
if !handleSubset {
newSubsets = append(newSubsets, subset)
}
}
ep.Subsets = newSubsets
if needUpdate {
if err := f.EnsureEndpoints(ep, t.clientset); err != nil {
logrus.Errorf("update endpoint %s failure %s", ep.Name, err.Error())
}
}
}
}
rbdep := event.Obj.(*v1.RbdEndpoint)
if rbdep == nil {
logrus.Warning("update event obj transfer to *v1.RbdEndpoint failure")
return
}
as := t.store.GetAppService(rbdep.Sid)
if as == nil {
logrus.Warnf("get app service from store failure, sid=%s", rbdep.Sid)
return
}
//rbdep.IP may be set "1.1.1.1" if it is domain
//so cache doamin address for show after handle complete
showEndpointIP := rbdep.IP
switch event.Type {
case discovery.UpdateEvent, discovery.CreateEvent:
err := t.createSubsetForAllEndpoint(as, rbdep)
if err != nil {
logrus.Warningf("ServiceID: %s; error adding subset: %s",
rbdep.Sid, err.Error())
return
}
for _, service := range as.GetServices(true) {
if err := f.EnsureService(service, t.clientset); err != nil {
logrus.Errorf("create or update service %s failure %s", service.Name, err.Error())
}
}
logrus.Debugf("upgrade endpoints and service for third app %s", as.ServiceAlias)
case discovery.DeleteEvent:
removeAddress(as, rbdep)
logrus.Debugf("third endpoint %s ip %s is deleted", rbdep.UUID, showEndpointIP)
case discovery.HealthEvent:
updateAddress(as, rbdep, true)
logrus.Debugf("third endpoint %s ip %s is onlined", rbdep.UUID, showEndpointIP)
case discovery.UnhealthyEvent:
logrus.Debugf("third endpoint %s ip %s is offlined", rbdep.UUID, showEndpointIP)
updateAddress(as, rbdep, false)
}
}
func (t *thirdparty) runDelete(sid string) {
as := t.store.GetAppService(sid) // TODO: need to delete?
if eps := as.GetEndpoints(true); eps != nil {
for _, ep := range eps {
logrus.Debugf("Endpoints delete: %+v", ep)
err := t.clientset.CoreV1().Endpoints(as.TenantID).Delete(context.Background(), ep.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logrus.Warningf("error deleting endpoint empty old app endpoints: %v", err)
}
}
}
}
func updateSubsetAddress(ready bool, subset corev1.EndpointSubset, address corev1.EndpointAddress) corev1.EndpointSubset {
if ready {
for i, a := range subset.NotReadyAddresses {
if a.IP == address.IP {
subset.NotReadyAddresses = append(subset.NotReadyAddresses[:i], subset.NotReadyAddresses[i+1:]...)
}
}
var exist bool
for _, a := range subset.Addresses {
if a.IP == address.IP {
exist = true
break
}
}
if !exist {
subset.Addresses = append(subset.Addresses, address)
}
} else {
for i, a := range subset.Addresses {
if a.IP == address.IP {
subset.Addresses = append(subset.Addresses[:i], subset.Addresses[i+1:]...)
}
}
var exist bool
for _, a := range subset.NotReadyAddresses {
if a.IP == address.IP {
exist = true
break
}
}
if !exist {
subset.NotReadyAddresses = append(subset.NotReadyAddresses, address)
}
}
return subset
}
func removeSubsetAddress(subset corev1.EndpointSubset, address corev1.EndpointAddress) corev1.EndpointSubset {
for i, a := range subset.Addresses {
if a.IP == address.IP {
subset.Addresses = append(subset.Addresses[:i], subset.Addresses[i+1:]...)
}
}
for i, a := range subset.NotReadyAddresses {
if a.IP == address.IP {
subset.NotReadyAddresses = append(subset.NotReadyAddresses[:i], subset.NotReadyAddresses[i+1:]...)
}
}
return subset
}
func isHealthy(subset corev1.EndpointSubset) bool {
if subset.Addresses != nil && len(subset.Addresses) > 0 {
return true
}
return false
}

View File

@ -24,20 +24,19 @@ import (
"strconv"
"strings"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
)
// EventType type of event
@ -112,26 +111,29 @@ func (a AppServiceBase) GetComponentDefinitionName() string {
if strings.HasPrefix(a.ServiceKind.String(), dbmodel.ServiceKindCustom.String()) {
return strings.Replace(a.ServiceKind.String(), dbmodel.ServiceKindCustom.String(), "", 1)
}
if a.discoveryCfg != nil && a.discoveryCfg.Type == dbmodel.DiscorveryTypeKubernetes.String() {
if a.ServiceKind == model.ServiceKindThirdParty {
return "core-thirdcomponent"
}
return ""
}
// IsCustomComponent -
func (a AppServiceBase) IsCustomComponent() bool {
if strings.HasPrefix(a.ServiceKind.String(), dbmodel.ServiceKindCustom.String()) {
return true
}
if a.discoveryCfg != nil && a.discoveryCfg.Type == dbmodel.DiscorveryTypeKubernetes.String() {
if a.ServiceKind == model.ServiceKindThirdParty {
return true
}
return false
}
// IsThirdComponent -
func (a AppServiceBase) IsThirdComponent() bool {
return a.ServiceKind.String() == dbmodel.ServiceKindThirdParty.String()
}
// SetDiscoveryCfg -
func (a *AppServiceBase) SetDiscoveryCfg(discoveryCfg *dbmodel.ThirdPartySvcDiscoveryCfg) {
a.discoveryCfg = discoveryCfg
}
@ -150,8 +152,8 @@ type AppService struct {
delServices []*corev1.Service
endpoints []*corev1.Endpoints
configMaps []*corev1.ConfigMap
ingresses []*extensions.Ingress
delIngs []*extensions.Ingress // ingresses which need to be deleted
ingresses []*networkingv1.Ingress
delIngs []*networkingv1.Ingress // ingresses which need to be deleted
secrets []*corev1.Secret
delSecrets []*corev1.Secret // secrets which need to be deleted
pods []*corev1.Pod
@ -406,9 +408,9 @@ func (a *AppService) DelEndpoints(ep *corev1.Endpoints) {
}
//GetIngress get ingress
func (a *AppService) GetIngress(canCopy bool) []*extensions.Ingress {
func (a *AppService) GetIngress(canCopy bool) []*networkingv1.Ingress {
if canCopy {
cr := make([]*extensions.Ingress, len(a.ingresses))
cr := make([]*networkingv1.Ingress, len(a.ingresses))
copy(cr, a.ingresses[0:])
return cr
}
@ -416,12 +418,12 @@ func (a *AppService) GetIngress(canCopy bool) []*extensions.Ingress {
}
//GetDelIngs gets delIngs which need to be deleted
func (a *AppService) GetDelIngs() []*extensions.Ingress {
func (a *AppService) GetDelIngs() []*networkingv1.Ingress {
return a.delIngs
}
//SetIngress set kubernetes ingress model
func (a *AppService) SetIngress(d *extensions.Ingress) {
func (a *AppService) SetIngress(d *networkingv1.Ingress) {
if len(a.ingresses) > 0 {
for i, ingress := range a.ingresses {
if ingress.GetName() == d.GetName() {
@ -434,12 +436,12 @@ func (a *AppService) SetIngress(d *extensions.Ingress) {
}
// SetIngresses sets k8s ingress list
func (a *AppService) SetIngresses(i []*extensions.Ingress) {
func (a *AppService) SetIngresses(i []*networkingv1.Ingress) {
a.ingresses = i
}
//DeleteIngress delete kubernetes ingress model
func (a *AppService) DeleteIngress(d *extensions.Ingress) {
func (a *AppService) DeleteIngress(d *networkingv1.Ingress) {
for i, c := range a.ingresses {
if c.GetName() == d.GetName() {
a.ingresses = append(a.ingresses[0:i], a.ingresses[i+1:]...)
@ -838,7 +840,7 @@ func (a *AppService) GetManifests() []*unstructured.Unstructured {
return a.manifests
}
//GetManifests get component custom manifest
//SetManifests get component custom manifest
func (a *AppService) SetManifests(manifests []*unstructured.Unstructured) {
a.manifests = manifests
}
@ -876,7 +878,7 @@ func (a *AppService) String() string {
a.statefulset,
a.deployment,
len(a.pods),
func(ing []*extensions.Ingress) string {
func(ing []*networkingv1.Ingress) string {
result := ""
for _, i := range ing {
result += i.Name + ","
@ -911,7 +913,7 @@ type TenantResource struct {
type K8sResources struct {
Services []*corev1.Service
Secrets []*corev1.Secret
Ingresses []*extensions.Ingress
Ingresses []*networkingv1.Ingress
}
//GetTCPMeshImageName get tcp mesh image name

View File

@ -67,7 +67,7 @@ func (v *ConfigFileVolume) CreateVolume(define *Define) error {
}
cmap.Data[path.Base(v.svm.VolumePath)] = util.ParseVariable(cf.FileContent, configs)
v.as.SetConfigMap(cmap)
define.SetVolumeCMap(cmap, path.Base(v.svm.VolumePath), v.svm.VolumePath, false)
define.SetVolumeCMap(cmap, path.Base(v.svm.VolumePath), v.svm.VolumePath, false, v.svm.Mode)
return nil
}
@ -77,7 +77,7 @@ func (v *ConfigFileVolume) CreateDependVolume(define *Define) error {
for _, env := range v.envs {
configs[env.Name] = env.Value
}
_, err := v.dbmanager.TenantServiceVolumeDao().GetVolumeByServiceIDAndName(v.smr.DependServiceID, v.smr.VolumeName)
depVol, err := v.dbmanager.TenantServiceVolumeDao().GetVolumeByServiceIDAndName(v.smr.DependServiceID, v.smr.VolumeName)
if err != nil {
return fmt.Errorf("error getting TenantServiceVolume according to serviceID(%s) and volumeName(%s): %v",
v.smr.DependServiceID, v.smr.VolumeName, err)
@ -98,6 +98,6 @@ func (v *ConfigFileVolume) CreateDependVolume(define *Define) error {
cmap.Data[path.Base(v.smr.VolumePath)] = util.ParseVariable(cf.FileContent, configs)
v.as.SetConfigMap(cmap)
define.SetVolumeCMap(cmap, path.Base(v.smr.VolumePath), v.smr.VolumePath, false)
define.SetVolumeCMap(cmap, path.Base(v.smr.VolumePath), v.smr.VolumePath, false, depVol.Mode)
return nil
}

View File

@ -23,6 +23,7 @@ import (
"os"
"path"
"sort"
"strconv"
"strings"
"github.com/goodrain/rainbond/db"
@ -211,8 +212,7 @@ func (v *Define) SetVolume(VolumeType dbmodel.VolumeType, name, mountPath, hostP
}
// SetVolumeCMap sets volumes and volumeMounts. The type of volumes is configMap.
func (v *Define) SetVolumeCMap(cmap *corev1.ConfigMap, k, p string, isReadOnly bool) {
var configFileMode int32 = 0777
func (v *Define) SetVolumeCMap(cmap *corev1.ConfigMap, k, p string, isReadOnly bool, mode *int32) {
vm := corev1.VolumeMount{
MountPath: p,
Name: cmap.Name,
@ -221,6 +221,11 @@ func (v *Define) SetVolumeCMap(cmap *corev1.ConfigMap, k, p string, isReadOnly b
}
v.volumeMounts = append(v.volumeMounts, vm)
var defaultMode int32 = 0777
if mode != nil {
// convert int to octal
octal, _ := strconv.ParseInt(strconv.Itoa(int(*mode)), 8, 64)
defaultMode = int32(octal)
}
vo := corev1.Volume{
Name: cmap.Name,
VolumeSource: corev1.VolumeSource{
@ -233,7 +238,7 @@ func (v *Define) SetVolumeCMap(cmap *corev1.ConfigMap, k, p string, isReadOnly b
{
Key: k,
Path: path.Base(p), // subpath
Mode: &configFileMode,
Mode: &defaultMode,
},
},
},

View File

@ -204,11 +204,10 @@ func (a *AppRuntimeSyncClient) AddThirdPartyEndpoint(req *model.Endpoint) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
_, _ = a.AppRuntimeSyncClient.AddThirdPartyEndpoint(ctx, &pb.AddThirdPartyEndpointsReq{
Uuid: req.UUID,
Sid: req.ServiceID,
Ip: req.IP,
Port: int32(req.Port),
IsOnline: *req.IsOnline,
Uuid: req.UUID,
Sid: req.ServiceID,
Ip: req.IP,
Port: int32(req.Port),
})
}
@ -217,11 +216,10 @@ func (a *AppRuntimeSyncClient) UpdThirdPartyEndpoint(req *model.Endpoint) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
_, _ = a.AppRuntimeSyncClient.UpdThirdPartyEndpoint(ctx, &pb.UpdThirdPartyEndpointsReq{
Uuid: req.UUID,
Sid: req.ServiceID,
Ip: req.IP,
Port: int32(req.Port),
IsOnline: *req.IsOnline,
Uuid: req.UUID,
Sid: req.ServiceID,
Ip: req.IP,
Port: int32(req.Port),
})
}

View File

@ -24,7 +24,6 @@ import (
"os"
"time"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/mq/api/grpc/pb"
"github.com/goodrain/rainbond/mq/client"
@ -59,11 +58,10 @@ type TaskManager struct {
func NewTaskManager(cfg option.Config,
store store.Storer,
controllermanager *controller.Manager,
garbageCollector *gc.GarbageCollector,
startCh *channels.RingChannel) *TaskManager {
garbageCollector *gc.GarbageCollector) *TaskManager {
ctx, cancel := context.WithCancel(context.Background())
handleManager := handle.NewManager(ctx, cfg, store, controllermanager, garbageCollector, startCh)
handleManager := handle.NewManager(ctx, cfg, store, controllermanager, garbageCollector)
healthStatus["status"] = "health"
healthStatus["info"] = "worker service health"
return &TaskManager{

View File

@ -25,11 +25,6 @@ import (
"strings"
"time"
"github.com/eapache/channels"
"github.com/sirupsen/logrus"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/db"
dbmodel "github.com/goodrain/rainbond/db/model"
@ -41,6 +36,9 @@ import (
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/goodrain/rainbond/worker/discover/model"
"github.com/goodrain/rainbond/worker/gc"
"github.com/sirupsen/logrus"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//Manager manager
@ -51,8 +49,6 @@ type Manager struct {
dbmanager db.Manager
controllerManager *controller.Manager
garbageCollector *gc.GarbageCollector
startCh *channels.RingChannel
}
//NewManager now handle
@ -60,8 +56,7 @@ func NewManager(ctx context.Context,
config option.Config,
store store.Storer,
controllerManager *controller.Manager,
garbageCollector *gc.GarbageCollector,
startCh *channels.RingChannel) *Manager {
garbageCollector *gc.GarbageCollector) *Manager {
return &Manager{
ctx: ctx,
@ -70,7 +65,6 @@ func NewManager(ctx context.Context,
store: store,
controllerManager: controllerManager,
garbageCollector: garbageCollector,
startCh: startCh,
}
}
@ -426,32 +420,6 @@ func (m *Manager) applyRuleExec(task *model.Task) error {
return fmt.Errorf("component apply rule controller failure:%s", err.Error())
}
if svc.Kind == dbmodel.ServiceKindThirdParty.String() && strings.HasPrefix(body.Action, "port") {
if oldAppService == nil {
m.store.RegistAppService(newAppService)
}
if err = m.store.InitOneThirdPartService(svc); err != nil {
logrus.Errorf("application apply service resource failure: %s", err.Error())
return fmt.Errorf("application apply service resource failure: %s", err.Error())
}
if body.Action == "port-open" {
m.startCh.In() <- &v1.Event{
Type: v1.StartEvent,
Sid: body.ServiceID,
Port: body.Port,
IsInner: body.IsInner,
}
}
if body.Action == "port-close" {
if !db.GetManager().TenantServicesPortDao().HasOpenPort(body.ServiceID) {
m.startCh.In() <- &v1.Event{
Type: v1.StopEvent,
Sid: body.ServiceID,
}
}
}
}
return nil
}

View File

@ -24,7 +24,11 @@ import (
"time"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
rainbondlistersv1alpha1 "github.com/goodrain/rainbond/pkg/generated/listers/rainbond/v1alpha1"
validation "github.com/goodrain/rainbond/util/endpoint"
dis "github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/discover"
"github.com/oam-dev/kubevela/pkg/utils/apply"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
@ -34,8 +38,11 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
runtimecache "sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
@ -44,6 +51,7 @@ import (
const reconcileTimeOut = 60 * time.Second
// Reconciler -
type Reconciler struct {
Client client.Client
restConfig *rest.Config
@ -52,6 +60,11 @@ type Reconciler struct {
applyer apply.Applicator
discoverPool *DiscoverPool
discoverNum prometheus.Gauge
informer runtimecache.Informer
lister rainbondlistersv1alpha1.ThirdComponentLister
recorder record.EventRecorder
}
// Reconcile is the main logic of appDeployment controller
@ -81,8 +94,9 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
r.discoverPool.RemoveDiscover(component)
return ctrl.Result{}, nil
}
logrus.Debugf("start to reconcile component %s/%s", component.Namespace, component.Name)
discover, err := NewDiscover(component, r.restConfig)
discover, err := dis.NewDiscover(component, r.restConfig, r.lister)
if err != nil {
component.Status.Phase = v1alpha1.ComponentFailed
component.Status.Reason = err.Error()
@ -95,6 +109,8 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
r.updateStatus(ctx, component)
return ctrl.Result{}, nil
}
r.discoverPool.AddDiscover(discover)
endpoints, err := discover.DiscoverOne(ctx)
if err != nil {
component.Status.Phase = v1alpha1.ComponentFailed
@ -102,7 +118,6 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
r.updateStatus(ctx, component)
return ctrl.Result{}, nil
}
r.discoverPool.AddDiscover(discover)
if len(endpoints) == 0 {
component.Status.Phase = v1alpha1.ComponentPending
@ -114,15 +129,10 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
// create endpoints for service
if len(component.Spec.Ports) > 0 && len(component.Status.Endpoints) > 0 {
var services corev1.ServiceList
selector, err := labels.Parse(labels.FormatLabels(map[string]string{
selector, _ := labels.Parse(labels.FormatLabels(map[string]string{
"service_id": component.Labels["service_id"],
}))
if err != nil {
logrus.Errorf("create selector failure %s", err.Error())
return ctrl.Result{}, err
}
err = r.Client.List(ctx, &services, &client.ListOptions{LabelSelector: selector})
if err != nil {
if err = r.Client.List(ctx, &services, &client.ListOptions{LabelSelector: selector}); err != nil {
return commonResult, nil
}
log.Infof("list component service success, size:%d", len(services.Items))
@ -130,6 +140,7 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
log.Warning("component service is empty")
return commonResult, nil
}
// init component port
var portMap = make(map[int][]*v1alpha1.ThirdComponentEndpointStatus)
for _, end := range component.Status.Endpoints {
@ -139,33 +150,34 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
}
portMap[port] = append(portMap[end.Address.GetPort()], end)
}
// create endpoint for component service
for _, service := range services.Items {
for _, port := range service.Spec.Ports {
// if component port not exist in endpoint port list, ignore it.
if sourceEndpoint, ok := portMap[int(port.Port)]; ok {
endpoint := createEndpoint(component, &service, sourceEndpoint, int(port.Port))
if len(component.Spec.Ports) == 1 && len(component.Spec.EndpointSource.StaticEndpoints) != 0 {
svc := services.Items[0]
ep := createEndpointsOnlyOnePort(component, svc, component.Status.Endpoints)
if ep != nil {
controllerutil.SetControllerReference(component, ep, r.Scheme)
r.applyEndpointService(ctx, log, &svc, ep)
}
} else {
for _, service := range services.Items {
service := service
for _, port := range service.Spec.Ports {
// if component port not exist in endpoint port list, ignore it.
sourceEndpoint, ok := portMap[int(port.Port)]
if !ok {
continue
}
endpoint := createEndpoint(component, &service, sourceEndpoint)
controllerutil.SetControllerReference(component, &endpoint, r.Scheme)
var old corev1.Endpoints
var apply = true
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: endpoint.Namespace, Name: endpoint.Name}, &old); err == nil {
// no change not apply
if reflect.DeepEqual(old.Subsets, endpoint.Subsets) {
apply = false
}
}
if apply {
if err := r.applyer.Apply(ctx, &endpoint); err != nil {
log.Errorf("apply endpoint for service %s failure %s", service.Name, err.Error())
}
log.Infof("apply endpoint for service %s success", service.Name)
}
r.applyEndpointService(ctx, log, &service, &endpoint)
}
}
}
}
component.Status.Endpoints = endpoints
component.Status.Phase = v1alpha1.ComponentRunning
component.Status.Reason = ""
if err := r.updateStatus(ctx, component); err != nil {
log.Errorf("update status failure %s", err.Error())
return commonResult, nil
@ -173,13 +185,104 @@ func (r *Reconciler) Reconcile(req ctrl.Request) (res reconcile.Result, retErr e
return reconcile.Result{}, nil
}
func createEndpoint(component *v1alpha1.ThirdComponent, service *corev1.Service, sourceEndpoint []*v1alpha1.ThirdComponentEndpointStatus, port int) corev1.Endpoints {
func (r *Reconciler) applyEndpointService(ctx context.Context, log *logrus.Entry, svc *corev1.Service, ep *corev1.Endpoints) {
var old corev1.Endpoints
if err := r.Client.Get(ctx, types.NamespacedName{Namespace: ep.Namespace, Name: ep.Name}, &old); err == nil {
// no change not apply
if reflect.DeepEqual(old.Subsets, ep.Subsets) &&
reflect.DeepEqual(old.Annotations, ep.Annotations) {
return
}
}
if err := r.applyer.Apply(ctx, ep); err != nil {
log.Errorf("apply endpoint for service %s failure %s", svc.Name, err.Error())
}
svc.Annotations = ep.Annotations
if err := r.applyer.Apply(ctx, svc); err != nil {
log.Errorf("apply service(%s) for updating annotation: %v", svc.Name, err)
}
log.Infof("apply endpoint for service %s success", svc.Name)
}
func createEndpointsOnlyOnePort(thirdComponent *v1alpha1.ThirdComponent, service corev1.Service, sourceEndpoints []*v1alpha1.ThirdComponentEndpointStatus) *corev1.Endpoints {
if len(thirdComponent.Spec.EndpointSource.StaticEndpoints) == 0 {
// support static endpoints only for now
return nil
}
if len(thirdComponent.Spec.Ports) != 1 {
return nil
}
logrus.Debugf("create endpoints with one port")
sourceEndpointPE := make(map[int][]*v1alpha1.ThirdComponentEndpointStatus)
for _, ep := range sourceEndpoints {
eps := sourceEndpointPE[ep.Address.GetPort()]
sourceEndpointPE[ep.Address.GetPort()] = append(eps, ep)
}
endpoints := &corev1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: service.Name,
Namespace: service.Namespace,
Labels: service.Labels,
},
}
servicePort := service.Spec.Ports[0]
var subsets []corev1.EndpointSubset
var domain string
for port, eps := range sourceEndpointPE {
subset := corev1.EndpointSubset{
Ports: []corev1.EndpointPort{
{
Name: servicePort.Name,
Port: int32(port),
Protocol: servicePort.Protocol,
AppProtocol: servicePort.AppProtocol,
},
},
}
for _, ep := range eps {
if validation.IsDomainNotIP(string(ep.Address)) {
domain = string(ep.Address)
}
address := corev1.EndpointAddress{
IP: ep.Address.GetIP(),
}
if ep.Status == v1alpha1.EndpointReady {
subset.Addresses = append(subset.Addresses, address)
} else {
subset.NotReadyAddresses = append(subset.NotReadyAddresses, address)
}
}
subsets = append(subsets, subset)
}
endpoints.Subsets = subsets
if domain != "" {
endpoints.Annotations = map[string]string{
"domain": domain,
}
}
return endpoints
}
func createEndpoint(component *v1alpha1.ThirdComponent, service *corev1.Service, sourceEndpoint []*v1alpha1.ThirdComponentEndpointStatus) corev1.Endpoints {
spep := make(map[int]int, len(sourceEndpoint))
for _, endpoint := range sourceEndpoint {
if endpoint.ServicePort != 0 {
spep[endpoint.ServicePort] = endpoint.Address.GetPort()
}
}
var domain string
endpoints := corev1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
@ -211,6 +314,9 @@ func createEndpoint(component *v1alpha1.ThirdComponent, service *corev1.Service,
}(),
Addresses: func() (re []corev1.EndpointAddress) {
for _, se := range sourceEndpoint {
if validation.IsDomainNotIP(string(se.Address)) {
domain = string(se.Address)
}
if se.Status == v1alpha1.EndpointReady {
re = append(re, corev1.EndpointAddress{
IP: se.Address.GetIP(),
@ -229,7 +335,7 @@ func createEndpoint(component *v1alpha1.ThirdComponent, service *corev1.Service,
}(),
NotReadyAddresses: func() (re []corev1.EndpointAddress) {
for _, se := range sourceEndpoint {
if se.Status == v1alpha1.EndpointNotReady {
if se.Status == v1alpha1.EndpointNotReady || se.Status == v1alpha1.EndpointUnhealthy {
re = append(re, corev1.EndpointAddress{
IP: se.Address.GetIP(),
TargetRef: &corev1.ObjectReference{
@ -249,6 +355,13 @@ func createEndpoint(component *v1alpha1.ThirdComponent, service *corev1.Service,
}
}(),
}
if domain != "" {
endpoints.Annotations = map[string]string{
"domain": domain,
}
}
return endpoints
}
@ -277,25 +390,36 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
Complete(r)
}
// Collect -
func (r *Reconciler) Collect(ch chan<- prometheus.Metric) {
ch <- prometheus.MustNewConstMetric(r.discoverNum.Desc(), prometheus.GaugeValue, r.discoverPool.GetSize())
}
// Setup adds a controller that reconciles AppDeployment.
func Setup(ctx context.Context, mgr ctrl.Manager) (*Reconciler, error) {
applyer := apply.NewAPIApplicator(mgr.GetClient())
informer, err := mgr.GetCache().GetInformerForKind(ctx, v1alpha1.SchemeGroupVersion.WithKind("ThirdComponent"))
if err != nil {
return nil, errors.WithMessage(err, "get informer for thirdcomponent")
}
lister := rainbondlistersv1alpha1.NewThirdComponentLister(informer.(cache.SharedIndexInformer).GetIndexer())
recorder := mgr.GetEventRecorderFor("thirdcomponent-controller")
r := &Reconciler{
Client: mgr.GetClient(),
restConfig: mgr.GetConfig(),
Scheme: mgr.GetScheme(),
applyer: applyer,
applyer: apply.NewAPIApplicator(mgr.GetClient()),
discoverNum: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "controller",
Name: "third_component_discover_number",
Help: "Number of running endpoint discover worker of third component.",
}),
informer: informer,
lister: lister,
recorder: recorder,
}
dp := NewDiscoverPool(ctx, r)
dp := NewDiscoverPool(ctx, r, recorder)
r.discoverPool = dp
return r, r.SetupWithManager(mgr)
}

View File

@ -16,7 +16,7 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package thirdcomponent
package discover
import (
"context"
@ -24,6 +24,8 @@ import (
"time"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
rainbondlistersv1alpha1 "github.com/goodrain/rainbond/pkg/generated/listers/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -32,13 +34,18 @@ import (
"k8s.io/client-go/rest"
)
// Discover -
type Discover interface {
GetComponent() *v1alpha1.ThirdComponent
DiscoverOne(ctx context.Context) ([]*v1alpha1.ThirdComponentEndpointStatus, error)
Discover(ctx context.Context, update chan *v1alpha1.ThirdComponent) ([]*v1alpha1.ThirdComponentEndpointStatus, error)
SetProberManager(proberManager prober.Manager)
}
func NewDiscover(component *v1alpha1.ThirdComponent, restConfig *rest.Config) (Discover, error) {
// NewDiscover -
func NewDiscover(component *v1alpha1.ThirdComponent,
restConfig *rest.Config,
lister rainbondlistersv1alpha1.ThirdComponentLister) (Discover, error) {
if component.Spec.EndpointSource.KubernetesService != nil {
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
@ -50,6 +57,12 @@ func NewDiscover(component *v1alpha1.ThirdComponent, restConfig *rest.Config) (D
client: clientset,
}, nil
}
if len(component.Spec.EndpointSource.StaticEndpoints) > 0 {
return &staticEndpoint{
component: component,
lister: lister,
}, nil
}
return nil, fmt.Errorf("not support source type")
}
@ -154,3 +167,7 @@ func (k *kubernetesDiscover) DiscoverOne(ctx context.Context) ([]*v1alpha1.Third
}
return es, nil
}
func (k *kubernetesDiscover) SetProberManager(proberManager prober.Manager) {
}

View File

@ -0,0 +1,102 @@
package discover
import (
"context"
"reflect"
"sync"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
rainbondlistersv1alpha1 "github.com/goodrain/rainbond/pkg/generated/listers/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober/results"
)
type staticEndpoint struct {
lister rainbondlistersv1alpha1.ThirdComponentLister
component *v1alpha1.ThirdComponent
pmlock sync.Mutex
proberManager prober.Manager
}
func (s *staticEndpoint) GetComponent() *v1alpha1.ThirdComponent {
return s.component
}
func (s *staticEndpoint) Discover(ctx context.Context, update chan *v1alpha1.ThirdComponent) ([]*v1alpha1.ThirdComponentEndpointStatus, error) {
for {
select {
case <-ctx.Done():
return nil, nil
case <-s.proberManager.Updates():
s.discoverOne(update)
}
}
}
func (s *staticEndpoint) DiscoverOne(ctx context.Context) ([]*v1alpha1.ThirdComponentEndpointStatus, error) {
component := s.component
var endpoints []*v1alpha1.ThirdComponentEndpointStatus
for _, ep := range component.Spec.EndpointSource.StaticEndpoints {
if ep.GetPort() != 0 {
address := v1alpha1.NewEndpointAddress(ep.GetIP(), ep.GetPort())
if address != nil {
endpoints = append(endpoints, &v1alpha1.ThirdComponentEndpointStatus{
Address: *address,
Name: ep.Name,
})
}
} else {
for _, port := range component.Spec.Ports {
address := v1alpha1.NewEndpointAddress(ep.Address, port.Port)
if address != nil {
endpoints = append(endpoints, &v1alpha1.ThirdComponentEndpointStatus{
Address: *address,
Name: ep.Name,
})
}
}
}
for _, ep := range endpoints {
// Make ready as the default status
ep.Status = v1alpha1.EndpointReady
}
}
// Update status with probe result
if s.proberManager != nil {
var newEndpoints []*v1alpha1.ThirdComponentEndpointStatus
for _, ep := range endpoints {
result, found := s.proberManager.GetResult(s.component.GetEndpointID(ep))
if !found {
// NotReady means the endpoint should not be online.
ep.Status = v1alpha1.EndpointNotReady
}
if result != results.Success {
ep.Status = v1alpha1.EndpointUnhealthy
}
newEndpoints = append(newEndpoints, ep)
}
return newEndpoints, nil
}
return endpoints, nil
}
func (s *staticEndpoint) SetProberManager(proberManager prober.Manager) {
s.pmlock.Lock()
defer s.pmlock.Unlock()
s.proberManager = proberManager
}
func (s *staticEndpoint) discoverOne(update chan *v1alpha1.ThirdComponent) {
component := s.component
// The method DiscoverOne of staticEndpoint does not need context.
endpoints, _ := s.DiscoverOne(context.TODO())
if !reflect.DeepEqual(endpoints, component.Status.Endpoints) {
newComponent := s.component.DeepCopy()
newComponent.Status.Endpoints = endpoints
update <- newComponent
}
}

View File

@ -25,35 +25,49 @@ import (
"time"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
dis "github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/discover"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// DiscoverPool -
type DiscoverPool struct {
ctx context.Context
lock sync.Mutex
discoverWorker map[string]*Worker
updateChan chan *v1alpha1.ThirdComponent
reconciler *Reconciler
recorder record.EventRecorder
}
func NewDiscoverPool(ctx context.Context, reconciler *Reconciler) *DiscoverPool {
// NewDiscoverPool -
func NewDiscoverPool(ctx context.Context,
reconciler *Reconciler,
recorder record.EventRecorder) *DiscoverPool {
dp := &DiscoverPool{
ctx: ctx,
discoverWorker: make(map[string]*Worker),
updateChan: make(chan *v1alpha1.ThirdComponent, 1024),
reconciler: reconciler,
recorder: recorder,
}
go dp.Start()
return dp
}
// GetSize -
func (d *DiscoverPool) GetSize() float64 {
d.lock.Lock()
defer d.lock.Unlock()
return float64(len(d.discoverWorker))
}
// Start -
func (d *DiscoverPool) Start() {
logrus.Infof("third component discover pool started")
for {
@ -74,65 +88,37 @@ func (d *DiscoverPool) Start() {
d.RemoveDiscover(component)
return
}
logrus.Errorf("update component status failure", err.Error())
logrus.Errorf("update component status failure: %s", err.Error())
}
logrus.Infof("update component %s status success by discover pool", name)
} else {
logrus.Debugf("component %s status endpoints not change", name)
}
}()
}
}
}
type Worker struct {
discover Discover
cancel context.CancelFunc
ctx context.Context
updateChan chan *v1alpha1.ThirdComponent
stoped bool
}
func (w *Worker) Start() {
defer func() {
logrus.Infof("discover endpoint list worker %s/%s stoed", w.discover.GetComponent().Namespace, w.discover.GetComponent().Name)
w.stoped = true
}()
w.stoped = false
logrus.Infof("discover endpoint list worker %s/%s started", w.discover.GetComponent().Namespace, w.discover.GetComponent().Name)
for {
w.discover.Discover(w.ctx, w.updateChan)
select {
case <-w.ctx.Done():
return
default:
}
}
}
func (w *Worker) UpdateDiscover(discover Discover) {
w.discover = discover
}
func (w *Worker) Stop() {
w.cancel()
}
func (w *Worker) IsStop() bool {
return w.stoped
}
func (d *DiscoverPool) newWorker(dis Discover) *Worker {
func (d *DiscoverPool) newWorker(dis dis.Discover) *Worker {
ctx, cancel := context.WithCancel(d.ctx)
return &Worker{
worker := &Worker{
ctx: ctx,
discover: dis,
cancel: cancel,
updateChan: d.updateChan,
}
component := dis.GetComponent()
if component.Spec.IsStaticEndpoints() {
proberManager := prober.NewManager(d.recorder)
dis.SetProberManager(proberManager)
worker.proberManager = proberManager
}
return worker
}
func (d *DiscoverPool) AddDiscover(dis Discover) {
// AddDiscover -
func (d *DiscoverPool) AddDiscover(dis dis.Discover) {
d.lock.Lock()
defer d.lock.Unlock()
component := dis.GetComponent()
@ -149,10 +135,14 @@ func (d *DiscoverPool) AddDiscover(dis Discover) {
return
}
worker := d.newWorker(dis)
if component.Spec.IsStaticEndpoints() {
worker.proberManager.AddThirdComponent(dis.GetComponent())
}
go worker.Start()
d.discoverWorker[key] = worker
}
// RemoveDiscover -
func (d *DiscoverPool) RemoveDiscover(component *v1alpha1.ThirdComponent) {
d.lock.Lock()
defer d.lock.Unlock()
@ -164,6 +154,7 @@ func (d *DiscoverPool) RemoveDiscover(component *v1alpha1.ThirdComponent) {
}
}
// RemoveDiscoverByName -
func (d *DiscoverPool) RemoveDiscoverByName(req types.NamespacedName) {
d.lock.Lock()
defer d.lock.Unlock()

View File

@ -0,0 +1,113 @@
package prober
import (
"fmt"
"net/http"
"net/url"
"time"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober/results"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/probe"
httpprobe "k8s.io/kubernetes/pkg/probe/http"
tcpprobe "k8s.io/kubernetes/pkg/probe/tcp"
)
const maxProbeRetries = 3
// Prober helps to check the readiness of a endpoint.
type prober struct {
http httpprobe.Prober
tcp tcpprobe.Prober
logger *logrus.Entry
recorder record.EventRecorder
}
// NewProber creates a Prober.
func newProber(
recorder record.EventRecorder) *prober {
return &prober{
logger: logrus.WithField("WHO", "Thirdcomponent Prober"),
http: httpprobe.New(),
tcp: tcpprobe.New(),
recorder: recorder,
}
}
// probe probes the endpoint address.
func (pb *prober) probe(thirdComponent *v1alpha1.ThirdComponent, endpointStatus *v1alpha1.ThirdComponentEndpointStatus, endpointID string) (results.Result, error) {
probeSpec := thirdComponent.Spec.Probe
if probeSpec == nil {
pb.logger.Warningf("probe for %s is nil", endpointID)
return results.Success, nil
}
result, output, err := pb.runProbeWithRetries(probeSpec, thirdComponent, endpointStatus, endpointID, maxProbeRetries)
if err != nil || (result != probe.Success) {
// Probe failed in one way or another.
if err != nil {
pb.logger.Infof("probe for %q errored: %v", endpointID, err)
pb.recordContainerEvent(thirdComponent, v1.EventTypeWarning, "EndpointUnhealthy", "probe errored: %v", err)
} else { // result != probe.Success
pb.logger.Debugf("probe for %q failed (%v): %s", endpointID, result, output)
pb.recordContainerEvent(thirdComponent, v1.EventTypeWarning, "EndpointUnhealthy", "probe failed: %s", output)
}
return results.Failure, err
}
return results.Success, nil
}
// runProbeWithRetries tries to probe the container in a finite loop, it returns the last result
// if it never succeeds.
func (pb *prober) runProbeWithRetries(p *v1alpha1.Probe, thirdComponent *v1alpha1.ThirdComponent, endpointStatus *v1alpha1.ThirdComponentEndpointStatus, endpointID string, retries int) (probe.Result, string, error) {
var err error
var result probe.Result
var output string
for i := 0; i < retries; i++ {
result, output, err = pb.runProbe(p, thirdComponent, endpointStatus, endpointID)
if err == nil {
return result, output, nil
}
}
return result, output, err
}
func (pb *prober) runProbe(p *v1alpha1.Probe, thirdComponent *v1alpha1.ThirdComponent, endpointStatus *v1alpha1.ThirdComponentEndpointStatus, endpointID string) (probe.Result, string, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.HTTPGet != nil {
u, err := url.Parse(endpointStatus.Address.EnsureScheme())
if err != nil {
return probe.Unknown, "", err
}
headers := buildHeader(p.HTTPGet.HTTPHeaders)
return pb.http.Probe(u, headers, timeout)
}
if p.TCPSocket != nil {
return pb.tcp.Probe(endpointStatus.Address.GetIP(), endpointStatus.Address.GetPort(), timeout)
}
pb.logger.Warningf("Failed to find probe builder for endpoint address: %v", endpointID)
return probe.Unknown, "", fmt.Errorf("missing probe handler for %s/%s", thirdComponent.Namespace, thirdComponent.Name)
}
// recordContainerEvent should be used by the prober for all endpoints related events.
func (pb *prober) recordContainerEvent(thirdComponent *v1alpha1.ThirdComponent, eventType, reason, message string, args ...interface{}) {
pb.recorder.Eventf(thirdComponent, eventType, reason, message, args...)
}
// buildHeaderMap takes a list of HTTPHeader <name, value> string
// pairs and returns a populated string->[]string http.Header map.
func buildHeader(headerList []v1alpha1.HTTPHeader) http.Header {
headers := make(http.Header)
for _, header := range headerList {
headers[header.Name] = append(headers[header.Name], header.Value)
}
return headers
}

View File

@ -0,0 +1,127 @@
package prober
import (
"sync"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober/results"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/metrics"
)
// ProberResults stores the cumulative number of a probe by result as prometheus metrics.
var ProberResults = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: "prober",
Name: "probe_total",
Help: "Cumulative number of a readiness probe for a thirdcomponent endpoint by result.",
StabilityLevel: metrics.ALPHA,
},
[]string{"result",
"endpoint",
"thirdcomponent",
"namespace"},
)
// Manager manages thirdcomponent probing. It creates a probe "worker" for every endpoint address that specifies a
// probe (AddThirdComponent). The worker periodically probes its assigned endpoint address and caches the results. The
// manager use the cached probe results to set the appropriate Ready state in the ThirdComponentEndpointStatus when
// requested (ThirdComponentEndpointStatus). Updating probe parameters is not currently supported.
type Manager interface {
// AddThirdComponent creates new probe workers for every endpoint address probe.
AddThirdComponent(thirdComponent *v1alpha1.ThirdComponent)
// GetResult returns the probe result based on the given ID.
GetResult(endpointID string) (results.Result, bool)
Stop()
// Updates creates a channel that receives an Update whenever its result changes (but not
// removed).
// NOTE: The current implementation only supports a single updates channel.
Updates() <-chan results.Update
}
type manager struct {
// Map of active workers for probes
workers map[string]*worker
// Lock for accessing & mutating workers
workerLock sync.RWMutex
// readinessManager manages the results of readiness probes
readinessManager results.Manager
// prober executes the probe actions.
prober *prober
// channel of updates
updates chan results.Update
}
// NewManager creates a Manager for pod probing.
func NewManager(
recorder record.EventRecorder) Manager {
updates := make(chan results.Update)
readinessManager := results.NewManager(updates)
return &manager{
prober: newProber(recorder),
readinessManager: readinessManager,
workers: make(map[string]*worker),
updates: updates,
}
}
func (m *manager) AddThirdComponent(thirdComponent *v1alpha1.ThirdComponent) {
if !thirdComponent.Spec.NeedProbe() {
return
}
m.workerLock.Lock()
defer m.workerLock.Unlock()
newWorkers := make(map[string]*worker)
for _, ep := range thirdComponent.Status.Endpoints {
key := string(ep.Address)
worker := newWorker(m, thirdComponent, *ep)
oldWorker, ok := m.workers[key]
if ok && worker.spec.Equals(oldWorker.spec) {
newWorkers[key] = oldWorker
delete(m.workers, key)
continue
}
// run new worker
newWorkers[key] = worker
go worker.run()
}
// stop unused workers
for _, worker := range m.workers {
worker.stop()
}
m.workers = newWorkers
}
func (m *manager) Stop() {
m.workerLock.Lock()
defer m.workerLock.Unlock()
for _, worker := range m.workers {
worker.stop()
}
}
func (m *manager) GetResult(endpointID string) (results.Result, bool) {
return m.readinessManager.Get(endpointID)
}
// Called by the worker after exiting.
func (m *manager) removeWorker(endpoint *v1alpha1.ThirdComponentEndpointStatus) {
m.workerLock.Lock()
defer m.workerLock.Unlock()
delete(m.workers, string(endpoint.Address))
}
func (m *manager) Updates() <-chan results.Update {
return m.updates
}

View File

@ -0,0 +1,139 @@
package prober
import (
"net/http"
"net/url"
"reflect"
"testing"
"time"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober/results"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/probe"
)
func TestHTTPHeaders(t *testing.T) {
testCases := []struct {
input []v1alpha1.HTTPHeader
output http.Header
}{
{[]v1alpha1.HTTPHeader{}, http.Header{}},
{[]v1alpha1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"}}},
{[]v1alpha1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Plumcakes", Value: "Muffins!"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins"},
"X-Muffins-Or-Plumcakes": {"Muffins!"}}},
{[]v1alpha1.HTTPHeader{
{Name: "X-Muffins-Or-Cupcakes", Value: "Muffins"},
{Name: "X-Muffins-Or-Cupcakes", Value: "Cupcakes, too"},
}, http.Header{"X-Muffins-Or-Cupcakes": {"Muffins", "Cupcakes, too"}}},
}
for _, test := range testCases {
headers := buildHeader(test.input)
if !reflect.DeepEqual(test.output, headers) {
t.Errorf("Expected %#v, got %#v", test.output, headers)
}
}
}
func TestProbe(t *testing.T) {
httpProbe := &v1alpha1.Probe{
Handler: v1alpha1.Handler{
HTTPGet: &v1alpha1.HTTPGetAction{},
},
}
tests := []struct {
name string
probe *v1alpha1.Probe
env []v1.EnvVar
execError bool
expectError bool
execResult probe.Result
expectedResult results.Result
expectCommand []string
}{
{
name: "No probe",
probe: nil,
expectedResult: results.Success,
},
{
name: "No handler",
probe: &v1alpha1.Probe{},
expectError: true,
expectedResult: results.Failure,
},
{
name: "Probe fails",
probe: httpProbe,
execResult: probe.Failure,
expectedResult: results.Failure,
},
{
name: "Probe succeeds",
probe: httpProbe,
execResult: probe.Success,
expectedResult: results.Success,
},
{
name: "Probe result is unknown",
probe: httpProbe,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
{
name: "Probe has an error",
probe: httpProbe,
execError: true,
expectError: true,
execResult: probe.Unknown,
expectedResult: results.Failure,
},
}
for i := range tests {
test := tests[i]
_ = test
prober := &prober{
recorder: &record.FakeRecorder{},
}
thirdComponent := &v1alpha1.ThirdComponent{
Spec: v1alpha1.ThirdComponentSpec{
Probe: test.probe,
},
}
if test.execError {
prober.http = fakeHTTPProber{test.execResult, errors.New("exec error")}
} else {
prober.http = fakeHTTPProber{test.execResult, nil}
}
result, err := prober.probe(thirdComponent, &v1alpha1.ThirdComponentEndpointStatus{}, "foobar")
if test.expectError && err == nil {
t.Errorf("[%s] Expected probe error but no error was returned.", test.name)
}
if !test.expectError && err != nil {
t.Errorf("[%s] Didn't expect probe error but got: %v", test.name, err)
}
if test.expectedResult != result {
t.Errorf("[%s] Expected result to be %v but was %v", test.name, test.expectedResult, result)
}
}
}
type fakeHTTPProber struct {
result probe.Result
err error
}
func (p fakeHTTPProber) Probe(url *url.URL, headers http.Header, timeout time.Duration) (probe.Result, string, error) {
return p.result, "", p.err
}

View File

@ -0,0 +1,108 @@
package results
import (
"sync"
)
// Manager provides a probe results cache and channel of updates.
type Manager interface {
// Get returns the cached result for the endpoint.
Get(endpointID string) (Result, bool)
// Set sets the cached result for the endpoint.
Set(endpointID string, result Result)
// Remove clears the cached result for the endpoint.
Remove(endpointID string)
}
// Result is the type for probe results.
type Result int
const (
// Unknown is encoded as -1 (type Result)
Unknown Result = iota - 1
// Success is encoded as 0 (type Result)
Success
// Failure is encoded as 1 (type Result)
Failure
)
func (r Result) String() string {
switch r {
case Success:
return "Success"
case Failure:
return "Failure"
default:
return "UNKNOWN"
}
}
// ToPrometheusType translates a Result to a form which is better understood by prometheus.
func (r Result) ToPrometheusType() float64 {
switch r {
case Success:
return 0
case Failure:
return 1
default:
return -1
}
}
// Update is an enum of the types of updates sent over the Updates channel.
type Update struct {
EndpointID string
Result Result
}
// Manager implementation.
type manager struct {
// guards the cache
sync.RWMutex
// map of endpoint ID -> probe Result
cache map[string]Result
// channel of updates
updates chan Update
}
var _ Manager = &manager{}
// NewManager creates and returns an empty results manager.
func NewManager(updates chan Update) Manager {
return &manager{
cache: make(map[string]Result),
updates: updates,
}
}
func (m *manager) Get(id string) (Result, bool) {
m.RLock()
defer m.RUnlock()
result, found := m.cache[id]
return result, found
}
func (m *manager) Set(id string, result Result) {
if m.setInternal(id, result) {
m.updates <- Update{EndpointID: id, Result: result}
}
}
func (m *manager) setInternal(id string, result Result) bool {
m.Lock()
defer m.Unlock()
prev, exists := m.cache[id]
if !exists || prev != result {
m.cache[id] = result
return true
}
return false
}
func (m *manager) Remove(id string) {
m.Lock()
defer m.Unlock()
delete(m.cache, id)
}

View File

@ -0,0 +1,26 @@
package results
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCacheOperations(t *testing.T) {
m := NewManager(make(chan Update))
unsetID := "unset"
setID := "set"
_, found := m.Get(unsetID)
assert.False(t, found, "unset result found")
m.Set(setID, Success)
result, found := m.Get(setID)
assert.True(t, result == Success, "set result")
assert.True(t, found, "set result found")
m.Remove(setID)
_, found = m.Get(setID)
assert.False(t, found, "removed result found")
}

View File

@ -0,0 +1,180 @@
package prober
import (
"math/rand"
"time"
"github.com/docker/go-metrics"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober/results"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/runtime"
)
const (
probeResultSuccessful string = "successful"
probeResultFailed string = "failed"
probeResultUnknown string = "unknown"
)
// worker handles the periodic probing of its assigned container. Each worker has a go-routine
// associated with it which runs the probe loop until the container permanently terminates, or the
// stop channel is closed. The worker uses the probe Manager's statusManager to get up-to-date
// container IDs.
type worker struct {
// Channel for stopping the probe.
stopCh chan struct{}
// The pod containing this probe (read-only)
thirdComponent *v1alpha1.ThirdComponent
// The endpoint to probe (read-only)
endpoint v1alpha1.ThirdComponentEndpointStatus
// Describes the probe configuration (read-only)
spec *v1alpha1.Probe
// The probe value during the initial delay.
initialValue results.Result
// Where to store this workers results.
resultsManager results.Manager
probeManager *manager
// The last probe result for this worker.
lastResult results.Result
// How many times in a row the probe has returned the same result.
resultRun int
// proberResultsMetricLabels holds the labels attached to this worker
// for the ProberResults metric by result.
proberResultsSuccessfulMetricLabels metrics.Labels
proberResultsFailedMetricLabels metrics.Labels
proberResultsUnknownMetricLabels metrics.Labels
}
// Creates and starts a new probe worker.
func newWorker(
m *manager,
thirdComponent *v1alpha1.ThirdComponent,
endpoint v1alpha1.ThirdComponentEndpointStatus) *worker {
w := &worker{
stopCh: make(chan struct{}, 1), // Buffer so stop() can be non-blocking.
probeManager: m,
thirdComponent: thirdComponent,
endpoint: endpoint,
}
w.spec = thirdComponent.Spec.Probe
w.resultsManager = m.readinessManager
w.initialValue = results.Failure
basicMetricLabels := metrics.Labels{
"endpoint": string(w.endpoint.Address),
"pod": w.thirdComponent.Name,
"namespace": w.thirdComponent.Namespace,
}
w.proberResultsSuccessfulMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsSuccessfulMetricLabels["result"] = probeResultSuccessful
w.proberResultsFailedMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsFailedMetricLabels["result"] = probeResultFailed
w.proberResultsUnknownMetricLabels = deepCopyPrometheusLabels(basicMetricLabels)
w.proberResultsUnknownMetricLabels["result"] = probeResultUnknown
return w
}
// run periodically probes the endpoint.
func (w *worker) run() {
logrus.Infof("start prober worker %s", w.thirdComponent.GetEndpointID(&w.endpoint))
probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second
// If kubelet restarted the probes could be started in rapid succession.
// Let the worker wait for a random portion of tickerPeriod before probing.
time.Sleep(time.Duration(rand.Float64() * float64(probeTickerPeriod)))
probeTicker := time.NewTicker(probeTickerPeriod)
defer func() {
// Clean up.
probeTicker.Stop()
w.resultsManager.Remove(w.thirdComponent.GetEndpointID(&w.endpoint))
w.probeManager.removeWorker(&w.endpoint)
ProberResults.Delete(w.proberResultsSuccessfulMetricLabels)
ProberResults.Delete(w.proberResultsFailedMetricLabels)
ProberResults.Delete(w.proberResultsUnknownMetricLabels)
}()
probeLoop:
for w.doProbe() {
// Wait for next probe tick.
select {
case <-w.stopCh:
break probeLoop
case <-probeTicker.C:
// continue
}
}
}
// stop stops the probe worker. The worker handles cleanup and removes itself from its manager.
// It is safe to call stop multiple times.
func (w *worker) stop() {
select {
case w.stopCh <- struct{}{}:
default: // Non-blocking.
}
}
// doProbe probes the endpint once and records the result.
// Returns whether the worker should continue.
func (w *worker) doProbe() (keepGoing bool) {
defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging)
defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true })
result, err := w.probeManager.prober.probe(w.thirdComponent, &w.endpoint, w.thirdComponent.GetEndpointID(&w.endpoint))
if err != nil {
// Prober error, throw away the result.
return true
}
switch result {
case results.Success:
ProberResults.With(w.proberResultsSuccessfulMetricLabels).Inc()
case results.Failure:
ProberResults.With(w.proberResultsFailedMetricLabels).Inc()
default:
ProberResults.With(w.proberResultsUnknownMetricLabels).Inc()
}
if w.lastResult == result {
w.resultRun++
} else {
w.lastResult = result
w.resultRun = 1
}
if (result == results.Failure && w.resultRun < int(w.spec.FailureThreshold)) ||
(result == results.Success && w.resultRun < int(w.spec.SuccessThreshold)) {
// Success or failure is below threshold - leave the probe state unchanged.
return true
}
w.resultsManager.Set(w.thirdComponent.GetEndpointID(&w.endpoint), result)
return true
}
func deepCopyPrometheusLabels(m metrics.Labels) metrics.Labels {
ret := make(metrics.Labels, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}

View File

@ -0,0 +1,65 @@
package thirdcomponent
import (
"context"
"github.com/goodrain/rainbond/pkg/apis/rainbond/v1alpha1"
dis "github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/discover"
"github.com/goodrain/rainbond/worker/master/controller/thirdcomponent/prober"
"github.com/sirupsen/logrus"
)
// Worker -
type Worker struct {
discover dis.Discover
cancel context.CancelFunc
ctx context.Context
updateChan chan *v1alpha1.ThirdComponent
stoped bool
proberManager prober.Manager
}
// Start -
func (w *Worker) Start() {
defer func() {
logrus.Infof("discover endpoint list worker %s/%s stoed", w.discover.GetComponent().Namespace, w.discover.GetComponent().Name)
w.stoped = true
if w.proberManager != nil {
w.proberManager.Stop()
}
}()
w.stoped = false
logrus.Infof("discover endpoint list worker %s/%s started", w.discover.GetComponent().Namespace, w.discover.GetComponent().Name)
for {
w.discover.Discover(w.ctx, w.updateChan)
select {
case <-w.ctx.Done():
return
default:
}
}
}
// UpdateDiscover -
func (w *Worker) UpdateDiscover(discover dis.Discover) {
component := discover.GetComponent()
if component.Spec.IsStaticEndpoints() {
w.proberManager.AddThirdComponent(discover.GetComponent())
discover.SetProberManager(w.proberManager)
}
w.discover = discover
}
// Stop -
func (w *Worker) Stop() {
w.cancel()
if w.proberManager != nil {
w.proberManager.Stop()
}
}
// IsStop -
func (w *Worker) IsStop() bool {
return w.stoped
}

File diff suppressed because it is too large Load Diff

View File

@ -133,16 +133,14 @@ message DelThirdPartyEndpointsReq {
}
message ThirdPartyEndpoint {
string uuid = 1;
string sid = 2;
string ip = 3;
int32 port = 4;
string status = 5;
bool is_online = 6;
string name = 1;
string componentID = 2;
string address = 3;
string status = 4;
}
message ThirdPartyEndpoints {
repeated ThirdPartyEndpoint obj = 1;
repeated ThirdPartyEndpoint items = 1;
}
message ListPodsBySIDReq {

Some files were not shown because too many files have changed in this diff Show More