upgrade kube sdk to 0.20.0 #931

This commit is contained in:
barnettZQG 2021-04-01 19:08:57 +08:00
parent 6a806a6466
commit 28be7c7d9c
60 changed files with 786 additions and 887 deletions

View File

@ -76,7 +76,7 @@ func ChargesVerifyController(w http.ResponseWriter, r *http.Request) {
}
if publicCloud := os.Getenv("PUBLIC_CLOUD"); publicCloud != "true" {
err := cloud.PriChargeSverify(tenant, quantityInt)
err := cloud.PriChargeSverify(r.Context(), tenant, quantityInt)
if err != nil {
err.Handle(r, w)
return

View File

@ -34,7 +34,7 @@ type ClusterController struct {
// GetClusterInfo -
func (t *ClusterController) GetClusterInfo(w http.ResponseWriter, r *http.Request) {
nodes, err := handler.GetClusterHandler().GetClusterInfo()
nodes, err := handler.GetClusterHandler().GetClusterInfo(r.Context())
if err != nil {
logrus.Errorf("get cluster info: %v", err)
httputil.ReturnError(r, w, 500, err.Error())
@ -46,7 +46,7 @@ func (t *ClusterController) GetClusterInfo(w http.ResponseWriter, r *http.Reques
//MavenSettingList maven setting list
func (t *ClusterController) MavenSettingList(w http.ResponseWriter, r *http.Request) {
httputil.ReturnSuccess(r, w, handler.GetClusterHandler().MavenSettingList())
httputil.ReturnSuccess(r, w, handler.GetClusterHandler().MavenSettingList(r.Context()))
}
//MavenSettingAdd maven setting add
@ -55,7 +55,7 @@ func (t *ClusterController) MavenSettingAdd(w http.ResponseWriter, r *http.Reque
if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &set, nil); !ok {
return
}
if err := handler.GetClusterHandler().MavenSettingAdd(&set); err != nil {
if err := handler.GetClusterHandler().MavenSettingAdd(r.Context(), &set); err != nil {
err.Handle(r, w)
return
}
@ -75,7 +75,7 @@ func (t *ClusterController) MavenSettingUpdate(w http.ResponseWriter, r *http.Re
Name: chi.URLParam(r, "name"),
Content: su.Content,
}
if err := handler.GetClusterHandler().MavenSettingUpdate(set); err != nil {
if err := handler.GetClusterHandler().MavenSettingUpdate(r.Context(), set); err != nil {
err.Handle(r, w)
return
}
@ -84,7 +84,7 @@ func (t *ClusterController) MavenSettingUpdate(w http.ResponseWriter, r *http.Re
//MavenSettingDelete maven setting file delete
func (t *ClusterController) MavenSettingDelete(w http.ResponseWriter, r *http.Request) {
err := handler.GetClusterHandler().MavenSettingDelete(chi.URLParam(r, "name"))
err := handler.GetClusterHandler().MavenSettingDelete(r.Context(), chi.URLParam(r, "name"))
if err != nil {
err.Handle(r, w)
return
@ -94,7 +94,7 @@ func (t *ClusterController) MavenSettingDelete(w http.ResponseWriter, r *http.Re
//MavenSettingDetail maven setting file delete
func (t *ClusterController) MavenSettingDetail(w http.ResponseWriter, r *http.Request) {
c, err := handler.GetClusterHandler().MavenSettingDetail(chi.URLParam(r, "name"))
c, err := handler.GetClusterHandler().MavenSettingDetail(r.Context(), chi.URLParam(r, "name"))
if err != nil {
err.Handle(r, w)
return

View File

@ -161,7 +161,7 @@ func (t *TenantStruct) TenantResources(w http.ResponseWriter, r *http.Request) {
return
}
rep, err := handler.GetTenantManager().GetTenantsResources(&tr)
rep, err := handler.GetTenantManager().GetTenantsResources(r.Context(), &tr)
if err != nil {
httputil.ReturnError(r, w, 500, fmt.Sprintf("get resources error, %v", err))
return

View File

@ -67,7 +67,7 @@ func (t *TenantStruct) StartService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*service.ContainerMemory); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -167,7 +167,7 @@ func (t *TenantStruct) RestartService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*service.ContainerMemory); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -218,7 +218,7 @@ func (t *TenantStruct) VerticalService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*mem); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*mem); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -273,7 +273,7 @@ func (t *TenantStruct) HorizontalService(w http.ResponseWriter, r *http.Request)
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.ContainerMemory*int(replicas)); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.ContainerMemory*int(replicas)); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -331,7 +331,7 @@ func (t *TenantStruct) BuildService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*service.ContainerMemory); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -547,7 +547,7 @@ func (t *TenantStruct) UpgradeService(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*service.ContainerMemory); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}
@ -633,7 +633,7 @@ func (t *TenantStruct) RollBack(w http.ResponseWriter, r *http.Request) {
tenant := r.Context().Value(middleware.ContextKey("tenant")).(*dbmodel.Tenants)
service := r.Context().Value(middleware.ContextKey("service")).(*dbmodel.TenantServices)
if err := handler.CheckTenantResource(tenant, service.Replicas*service.ContainerMemory); err != nil {
if err := handler.CheckTenantResource(r.Context(), tenant, service.Replicas*service.ContainerMemory); err != nil {
httputil.ReturnResNotEnough(r, w, err.Error())
return
}

View File

@ -19,6 +19,7 @@
package cloud
import (
"context"
"fmt"
"io/ioutil"
"net/http"
@ -70,14 +71,14 @@ func PubChargeSverify(tenant *model.Tenants, quantity int, reason string) *util.
}
// PriChargeSverify verifies that the resources requested in the private cloud are legal
func PriChargeSverify(tenant *model.Tenants, quantity int) *util.APIHandleError {
func PriChargeSverify(ctx context.Context, tenant *model.Tenants, quantity int) *util.APIHandleError {
t, err := db.GetManager().TenantDao().GetTenantByUUID(tenant.UUID)
if err != nil {
logrus.Errorf("error getting tenant: %v", err)
return util.CreateAPIHandleError(500, fmt.Errorf("error getting tenant: %v", err))
}
if t.LimitMemory == 0 {
clusterStats, err := handler.GetTenantManager().GetAllocatableResources()
clusterStats, err := handler.GetTenantManager().GetAllocatableResources(ctx)
if err != nil {
logrus.Errorf("error getting allocatable resources: %v", err)
return util.CreateAPIHandleError(500, fmt.Errorf("error getting allocatable resources: %v", err))

View File

@ -1,6 +1,7 @@
package handler
import (
"context"
"fmt"
"os"
"runtime"
@ -20,12 +21,12 @@ import (
// ClusterHandler -
type ClusterHandler interface {
GetClusterInfo() (*model.ClusterResource, error)
MavenSettingAdd(ms *MavenSetting) *util.APIHandleError
MavenSettingList() (re []MavenSetting)
MavenSettingUpdate(ms *MavenSetting) *util.APIHandleError
MavenSettingDelete(name string) *util.APIHandleError
MavenSettingDetail(name string) (*MavenSetting, *util.APIHandleError)
GetClusterInfo(ctx context.Context) (*model.ClusterResource, error)
MavenSettingAdd(ctx context.Context, ms *MavenSetting) *util.APIHandleError
MavenSettingList(ctx context.Context) (re []MavenSetting)
MavenSettingUpdate(ctx context.Context, ms *MavenSetting) *util.APIHandleError
MavenSettingDelete(ctx context.Context, name string) *util.APIHandleError
MavenSettingDetail(ctx context.Context, name string) (*MavenSetting, *util.APIHandleError)
}
// NewClusterHandler -
@ -43,7 +44,7 @@ type clusterAction struct {
cacheTime time.Time
}
func (c *clusterAction) GetClusterInfo() (*model.ClusterResource, error) {
func (c *clusterAction) GetClusterInfo(ctx context.Context) (*model.ClusterResource, error) {
timeout, _ := strconv.Atoi(os.Getenv("CLUSTER_INFO_CACHE_TIME"))
if timeout == 0 {
// default is 30 seconds
@ -56,7 +57,7 @@ func (c *clusterAction) GetClusterInfo() (*model.ClusterResource, error) {
logrus.Debugf("cluster info cache is timeout, will calculate a new value")
}
nodes, err := c.listNodes()
nodes, err := c.listNodes(ctx)
if err != nil {
return nil, fmt.Errorf("[GetClusterInfo] list nodes: %v", err)
}
@ -85,7 +86,7 @@ func (c *clusterAction) GetClusterInfo() (*model.ClusterResource, error) {
for i := range usedNodeList {
node := usedNodeList[i]
pods, err := c.listPods(node.Name)
pods, err := c.listPods(ctx, node.Name)
if err != nil {
return nil, fmt.Errorf("list pods: %v", err)
}
@ -166,9 +167,9 @@ func (c *clusterAction) GetClusterInfo() (*model.ClusterResource, error) {
return result, nil
}
func (c *clusterAction) listNodes() ([]*corev1.Node, error) {
func (c *clusterAction) listNodes(ctx context.Context) ([]*corev1.Node, error) {
opts := metav1.ListOptions{}
nodeList, err := c.clientset.CoreV1().Nodes().List(opts)
nodeList, err := c.clientset.CoreV1().Nodes().List(ctx, opts)
if err != nil {
return nil, err
}
@ -206,8 +207,8 @@ func containsTaints(node *corev1.Node) bool {
return false
}
func (c *clusterAction) listPods(nodeName string) (pods []corev1.Pod, err error) {
podList, err := c.clientset.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
func (c *clusterAction) listPods(ctx context.Context, nodeName string) (pods []corev1.Pod, err error) {
podList, err := c.clientset.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
if err != nil {
return pods, err
@ -225,8 +226,8 @@ type MavenSetting struct {
}
//MavenSettingList maven setting list
func (c *clusterAction) MavenSettingList() (re []MavenSetting) {
cms, err := c.clientset.CoreV1().ConfigMaps(c.namespace).List(metav1.ListOptions{
func (c *clusterAction) MavenSettingList(ctx context.Context) (re []MavenSetting) {
cms, err := c.clientset.CoreV1().ConfigMaps(c.namespace).List(ctx, metav1.ListOptions{
LabelSelector: "configtype=mavensetting",
})
if err != nil {
@ -244,7 +245,7 @@ func (c *clusterAction) MavenSettingList() (re []MavenSetting) {
}
//MavenSettingAdd maven setting add
func (c *clusterAction) MavenSettingAdd(ms *MavenSetting) *util.APIHandleError {
func (c *clusterAction) MavenSettingAdd(ctx context.Context, ms *MavenSetting) *util.APIHandleError {
config := &corev1.ConfigMap{}
config.Name = ms.Name
config.Namespace = c.namespace
@ -258,7 +259,7 @@ func (c *clusterAction) MavenSettingAdd(ms *MavenSetting) *util.APIHandleError {
config.Data = map[string]string{
"mavensetting": ms.Content,
}
_, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Create(config)
_, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Create(ctx, config, metav1.CreateOptions{})
if err != nil {
if apierrors.IsAlreadyExists(err) {
return &util.APIHandleError{Code: 400, Err: fmt.Errorf("setting name is exist")}
@ -272,8 +273,8 @@ func (c *clusterAction) MavenSettingAdd(ms *MavenSetting) *util.APIHandleError {
}
//MavenSettingUpdate maven setting file update
func (c *clusterAction) MavenSettingUpdate(ms *MavenSetting) *util.APIHandleError {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(ms.Name, metav1.GetOptions{})
func (c *clusterAction) MavenSettingUpdate(ctx context.Context, ms *MavenSetting) *util.APIHandleError {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(ctx, ms.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting name is not exist")}
@ -289,7 +290,7 @@ func (c *clusterAction) MavenSettingUpdate(ms *MavenSetting) *util.APIHandleErro
}
sm.Data["mavensetting"] = ms.Content
sm.Annotations["updateTime"] = time.Now().Format(time.RFC3339)
if _, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Update(sm); err != nil {
if _, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Update(ctx, sm, metav1.UpdateOptions{}); err != nil {
logrus.Errorf("update maven setting configmap failure %s", err.Error())
return &util.APIHandleError{Code: 500, Err: fmt.Errorf("update setting config failure")}
}
@ -299,8 +300,8 @@ func (c *clusterAction) MavenSettingUpdate(ms *MavenSetting) *util.APIHandleErro
}
//MavenSettingDelete maven setting file delete
func (c *clusterAction) MavenSettingDelete(name string) *util.APIHandleError {
err := c.clientset.CoreV1().ConfigMaps(c.namespace).Delete(name, &metav1.DeleteOptions{})
func (c *clusterAction) MavenSettingDelete(ctx context.Context, name string) *util.APIHandleError {
err := c.clientset.CoreV1().ConfigMaps(c.namespace).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting not found")}
@ -312,8 +313,8 @@ func (c *clusterAction) MavenSettingDelete(name string) *util.APIHandleError {
}
//MavenSettingDetail maven setting file delete
func (c *clusterAction) MavenSettingDetail(name string) (*MavenSetting, *util.APIHandleError) {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(name, metav1.GetOptions{})
func (c *clusterAction) MavenSettingDetail(ctx context.Context, name string) (*MavenSetting, *util.APIHandleError) {
sm, err := c.clientset.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
logrus.Errorf("get maven setting config failure %s", err.Error())
return nil, &util.APIHandleError{Code: 404, Err: fmt.Errorf("setting not found")}

View File

@ -19,10 +19,13 @@
package handler
import (
"context"
"fmt"
"strings"
"time"
"container/list"
"github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/db"
gclient "github.com/goodrain/rainbond/mq/client"
@ -67,8 +70,9 @@ func checkResourceEnough(serviceID string) error {
logrus.Errorf("get tenant by id error: %v", err)
return err
}
return CheckTenantResource(tenant, service.ContainerMemory*service.Replicas)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
return CheckTenantResource(ctx, tenant, service.ContainerMemory*service.Replicas)
}
func (b *BatchOperationHandler) serviceStartupSequence(serviceIDs []string) map[string][]string {

View File

@ -261,7 +261,7 @@ type QueryResult struct {
}
//GetTenantsResources Gets the resource usage of the specified tenant.
func (t *TenantAction) GetTenantsResources(tr *api_model.TenantResources) (map[string]map[string]interface{}, error) {
func (t *TenantAction) GetTenantsResources(ctx context.Context, tr *api_model.TenantResources) (map[string]map[string]interface{}, error) {
ids, err := db.GetManager().TenantDao().GetTenantIDsByNames(tr.Body.TenantNames)
if err != nil {
return nil, err
@ -279,7 +279,7 @@ func (t *TenantAction) GetTenantsResources(tr *api_model.TenantResources) (map[s
serviceTenantCount[s.TenantID]++
}
// get cluster resources
clusterStats, err := t.GetAllocatableResources()
clusterStats, err := t.GetAllocatableResources(ctx)
if err != nil {
return nil, fmt.Errorf("error getting allocatalbe cpu and memory: %v", err)
}
@ -379,10 +379,10 @@ type ClusterResourceStats struct {
RequestMemory int64
}
func (t *TenantAction) initClusterResource() error {
func (t *TenantAction) initClusterResource(ctx context.Context) error {
if t.cacheClusterResourceStats == nil || t.cacheTime.Add(time.Minute*3).Before(time.Now()) {
var crs ClusterResourceStats
nodes, err := t.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := t.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
logrus.Errorf("get cluster nodes failure %s", err.Error())
return err
@ -411,9 +411,9 @@ func (t *TenantAction) initClusterResource() error {
}
// GetAllocatableResources returns allocatable cpu and memory (MB)
func (t *TenantAction) GetAllocatableResources() (*ClusterResourceStats, error) {
func (t *TenantAction) GetAllocatableResources(ctx context.Context) (*ClusterResourceStats, error) {
var crs ClusterResourceStats
if t.initClusterResource() != nil {
if t.initClusterResource(ctx) != nil {
return &crs, nil
}
ts, err := t.statusCli.GetAllTenantResource()
@ -582,8 +582,8 @@ func (t *TenantAction) IsClosedStatus(status string) bool {
}
//GetClusterResource get cluster resource
func (t *TenantAction) GetClusterResource() *ClusterResourceStats {
if t.initClusterResource() != nil {
func (t *TenantAction) GetClusterResource(ctx context.Context) *ClusterResourceStats {
if t.initClusterResource(ctx) != nil {
return nil
}
return t.cacheClusterResourceStats

View File

@ -19,6 +19,8 @@
package handler
import (
"context"
api_model "github.com/goodrain/rainbond/api/model"
"github.com/goodrain/rainbond/api/util"
dbmodel "github.com/goodrain/rainbond/db/model"
@ -33,9 +35,9 @@ type TenantHandler interface {
GetTenantsName() ([]string, error)
StatsMemCPU(services []*dbmodel.TenantServices) (*api_model.StatsInfo, error)
TotalMemCPU(services []*dbmodel.TenantServices) (*api_model.StatsInfo, error)
GetTenantsResources(tr *api_model.TenantResources) (map[string]map[string]interface{}, error)
GetTenantsResources(ctx context.Context, tr *api_model.TenantResources) (map[string]map[string]interface{}, error)
GetTenantResource(tenantID string) (TenantResourceStats, error)
GetAllocatableResources() (*ClusterResourceStats, error)
GetAllocatableResources(ctx context.Context) (*ClusterResourceStats, error)
GetServicesResources(tr *api_model.ServicesResources) (map[string]map[string]interface{}, error)
TenantsSum() (int, error)
GetProtocols() ([]*dbmodel.RegionProcotols, *util.APIHandleError)
@ -45,5 +47,5 @@ type TenantHandler interface {
BindTenantsResource(source []*dbmodel.Tenants) api_model.TenantList
UpdateTenant(*dbmodel.Tenants) error
DeleteTenant(tenantID string) error
GetClusterResource() *ClusterResourceStats
GetClusterResource(ctx context.Context) *ClusterResourceStats
}

View File

@ -1,13 +1,15 @@
package handler
import (
"context"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// CheckTenantResource check tenant's resource is support action or not
func CheckTenantResource(tenant *dbmodel.Tenants, needMemory int) error {
func CheckTenantResource(ctx context.Context, tenant *dbmodel.Tenants, needMemory int) error {
ts, err := GetServiceManager().GetTenantRes(tenant.UUID)
if err != nil {
return err
@ -20,7 +22,7 @@ func CheckTenantResource(tenant *dbmodel.Tenants, needMemory int) error {
return errors.New("tenant_lack_of_memory")
}
}
clusterInfo, err := GetTenantManager().GetAllocatableResources()
clusterInfo, err := GetTenantManager().GetAllocatableResources(ctx)
if err != nil {
logrus.Errorf("get cluster resources failure for check tenant resource: %v", err.Error())
}

View File

@ -19,7 +19,9 @@
package metric
import (
"context"
"fmt"
"time"
"github.com/goodrain/rainbond/api/handler"
"github.com/prometheus/client_golang/prometheus"
@ -102,7 +104,9 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.tenantLimit.WithLabelValues(t.UUID, t.UUID).Set(float64(t.LimitMemory))
}
// cluster memory
resource := handler.GetTenantManager().GetClusterResource()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
defer cancel()
resource := handler.GetTenantManager().GetClusterResource(ctx)
if resource != nil {
e.clusterMemoryTotal.Set(float64(resource.AllMemory))
e.clusterCPUTotal.Set(float64(resource.AllCPU))

View File

@ -440,12 +440,12 @@ func (s *slugBuild) runBuildJob(re *Request) error {
//set maven setting
var mavenSettingConfigName string
if mavenSettingName != "" && re.Lang.String() == code.JavaMaven.String() {
if setting := jobc.GetJobController().GetLanguageBuildSetting(code.JavaMaven, mavenSettingName); setting != "" {
if setting := jobc.GetJobController().GetLanguageBuildSetting(re.Ctx, code.JavaMaven, mavenSettingName); setting != "" {
mavenSettingConfigName = setting
} else {
logrus.Warnf("maven setting config %s not found", mavenSettingName)
}
} else if settingName := jobc.GetJobController().GetDefaultLanguageBuildSetting(code.JavaMaven); settingName != "" {
} else if settingName := jobc.GetJobController().GetDefaultLanguageBuildSetting(re.Ctx, code.JavaMaven); settingName != "" {
mavenSettingConfigName = settingName
}
if mavenSettingConfigName != "" {

View File

@ -266,7 +266,7 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
}
func (i *SourceCodeBuildItem) getExtraHosts() (extraHosts []string, err error) {
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(i.RbdRepoName, metav1.GetOptions{})
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(context.Background(), i.RbdRepoName, metav1.GetOptions{})
if err != nil {
logrus.Errorf("do not found ep by name: %s in namespace: %s", i.RbdRepoName, i.Namespace)
return nil, err
@ -281,7 +281,7 @@ func (i *SourceCodeBuildItem) getExtraHosts() (extraHosts []string, err error) {
}
func (i *SourceCodeBuildItem) getHostAlias() (hostAliasList []build.HostAlias, err error) {
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(i.RbdRepoName, metav1.GetOptions{})
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(context.Background(), i.RbdRepoName, metav1.GetOptions{})
if err != nil {
logrus.Errorf("do not found ep by name: %s in namespace: %s", i.RbdRepoName, i.Namespace)
return nil, err

View File

@ -44,8 +44,8 @@ type Controller interface {
GetJob(string) (*corev1.Pod, error)
GetServiceJobs(serviceID string) ([]*corev1.Pod, error)
DeleteJob(job string)
GetLanguageBuildSetting(lang code.Lang, name string) string
GetDefaultLanguageBuildSetting(lang code.Lang) string
GetLanguageBuildSetting(ctx context.Context, lang code.Lang, name string) string
GetDefaultLanguageBuildSetting(ctx context.Context, lang code.Lang) string
}
type controller struct {
KubeClient kubernetes.Interface
@ -163,7 +163,7 @@ func (c *controller) ExecJob(ctx context.Context, job *corev1.Pod, logger io.Wri
c.subJobStatus.Store(job.Name, result)
return nil
}
_, err := c.KubeClient.CoreV1().Pods(c.namespace).Create(job)
_, err := c.KubeClient.CoreV1().Pods(c.namespace).Create(ctx, job, metav1.CreateOptions{})
if err != nil {
return err
}
@ -194,7 +194,7 @@ func (c *controller) getLogger(ctx context.Context, job string, writer io.Writer
// reader log just only do once, if complete, exit this func
logrus.Debugf("job[%s] container is ready, start get log stream", job)
podLogRequest := c.KubeClient.CoreV1().Pods(c.namespace).GetLogs(job, &corev1.PodLogOptions{Follow: true})
reader, err := podLogRequest.Stream()
reader, err := podLogRequest.Stream(ctx)
if err != nil {
logrus.Warnf("get build job pod log data error: %s", err.Error())
return
@ -222,7 +222,7 @@ func (c *controller) DeleteJob(job string) {
namespace := c.namespace
logrus.Debugf("start delete job: %s", job)
// delete job
if err := c.KubeClient.CoreV1().Pods(namespace).Delete(job, &metav1.DeleteOptions{}); err != nil {
if err := c.KubeClient.CoreV1().Pods(namespace).Delete(context.Background(), job, metav1.DeleteOptions{}); err != nil {
if !k8sErrors.IsNotFound(err) {
logrus.Errorf("delete job failed: %s", err.Error())
}
@ -232,8 +232,8 @@ func (c *controller) DeleteJob(job string) {
logrus.Infof("delete job %s finish", job)
}
func (c *controller) GetLanguageBuildSetting(lang code.Lang, name string) string {
config, err := c.KubeClient.CoreV1().ConfigMaps(c.namespace).Get(name, metav1.GetOptions{})
func (c *controller) GetLanguageBuildSetting(ctx context.Context, lang code.Lang, name string) string {
config, err := c.KubeClient.CoreV1().ConfigMaps(c.namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
logrus.Errorf("get configmap %s failure %s", name, err.Error())
}
@ -243,8 +243,8 @@ func (c *controller) GetLanguageBuildSetting(lang code.Lang, name string) string
return ""
}
func (c *controller) GetDefaultLanguageBuildSetting(lang code.Lang) string {
config, err := c.KubeClient.CoreV1().ConfigMaps(c.namespace).List(metav1.ListOptions{
func (c *controller) GetDefaultLanguageBuildSetting(ctx context.Context, lang code.Lang) string {
config, err := c.KubeClient.CoreV1().ConfigMaps(c.namespace).List(ctx, metav1.ListOptions{
LabelSelector: "default=true",
})
if err != nil {

View File

@ -141,7 +141,7 @@ func (k *k8sDiscover) rewatchWithErr(name string, callback CallbackUpdate, err e
}
func (k *k8sDiscover) list(name string) []*config.Endpoint {
podList, err := k.clientset.CoreV1().Pods(k.cfg.RbdNamespace).List(metav1.ListOptions{
podList, err := k.clientset.CoreV1().Pods(k.cfg.RbdNamespace).List(context.Background(), metav1.ListOptions{
LabelSelector: "name=" + name,
})
if err != nil {

View File

@ -3,12 +3,13 @@ package discover
import (
"context"
"fmt"
"testing"
"time"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/discover/config"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -76,14 +77,14 @@ func TestK8sDiscover_AddProject(t *testing.T) {
Status: corev1.ConditionTrue,
})
pod.Status.PodIP = "172.20.0.50"
_, err := clientset.CoreV1().Pods("").Update(pod)
_, err := clientset.CoreV1().Pods("").Update(context.Background(), pod, metav1.UpdateOptions{})
if err != nil {
t.Error(err)
}
time.Sleep(1 * time.Second)
err = clientset.CoreV1().Pods("").Delete(pod.Name, &metav1.DeleteOptions{})
err = clientset.CoreV1().Pods("").Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
if err != nil {
t.Error(err)
}

View File

@ -19,12 +19,15 @@
package http
import (
"context"
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
extensions "k8s.io/api/extensions/v1beta1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
api_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -373,7 +376,7 @@ func Test_ListIngress(t *testing.T) {
t.Errorf("can't create Kubernetes's client: %v", err)
}
ings, err := clientSet.ExtensionsV1beta1().Ingresses("gateway").List(api_meta_v1.ListOptions{})
ings, err := clientSet.ExtensionsV1beta1().Ingresses("gateway").List(context.TODO(), api_meta_v1.ListOptions{})
if err != nil {
t.Fatalf("error listing ingresses: %v", err)
}
@ -490,13 +493,13 @@ func TestHttpUpstreamHashBy(t *testing.T) {
func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *testing.T) *corev1.Namespace {
t.Helper()
n, err := clientSet.CoreV1().Namespaces().Update(ns)
n, err := clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Namespace %v not found, creating", ns)
n, err = clientSet.CoreV1().Namespaces().Create(ns)
n, err = clientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating namespace %+v: %v", ns, err)
}
@ -515,13 +518,13 @@ func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *te
func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t *testing.T) *v1beta1.Deployment {
t.Helper()
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(deploy)
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Deployment %v not found, creating", deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating deployment %+v: %v", deploy, err)
}
@ -540,9 +543,9 @@ func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t
func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *testing.T) *corev1.Service {
t.Helper()
clientSet.CoreV1().Services(service.Namespace).Delete(service.Name, &metav1.DeleteOptions{})
clientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{})
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(service)
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(context.TODO(), service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service %+v: %v", service, err)
}
@ -554,13 +557,13 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}

View File

@ -19,6 +19,10 @@
package https
import (
"context"
"testing"
"time"
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
@ -29,8 +33,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"testing"
"time"
)
const (
@ -212,13 +214,13 @@ func TestHttps(t *testing.T) {
func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *testing.T) *corev1.Namespace {
t.Helper()
n, err := clientSet.CoreV1().Namespaces().Update(ns)
n, err := clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Namespace %v not found, creating", ns)
n, err = clientSet.CoreV1().Namespaces().Create(ns)
n, err = clientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating namespace %+v: %v", ns, err)
}
@ -237,13 +239,13 @@ func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *te
func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t *testing.T) *v1beta1.Deployment {
t.Helper()
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(deploy)
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Deployment %v not found, creating", deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating deployment %+v: %v", deploy, err)
}
@ -262,9 +264,9 @@ func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t
func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *testing.T) *corev1.Service {
t.Helper()
clientSet.CoreV1().Services(service.Namespace).Delete(service.Name, &metav1.DeleteOptions{})
clientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{})
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(service)
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(context.TODO(), service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service %+v: %v", service, err)
}
@ -276,13 +278,13 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}
@ -301,13 +303,13 @@ func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface,
func ensureSecret(service *corev1.Secret, clientSet kubernetes.Interface, t *testing.T) *corev1.Secret {
t.Helper()
serc, err := clientSet.CoreV1().Secrets(service.Namespace).Update(service)
serc, err := clientSet.CoreV1().Secrets(service.Namespace).Update(context.TODO(), service, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Secret %v not found, creating", service)
serc, err = clientSet.CoreV1().Secrets(service.Namespace).Create(service)
serc, err = clientSet.CoreV1().Secrets(service.Namespace).Create(context.TODO(), service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating secret %+v: %v", service, err)
}

View File

@ -19,6 +19,7 @@
package rs
import (
"context"
"testing"
"github.com/goodrain/rainbond/gateway/controller"
@ -32,7 +33,7 @@ func TestReplicaSetTimestamp(t *testing.T) {
}
ns := "c1a29fe4d7b0413993dc859430cf743d"
rs, err := clientset.ExtensionsV1beta1().ReplicaSets(ns).Get("88d8c4c55657217522f3bb86cfbded7e-deployment-7545b75dbd", metav1.GetOptions{})
rs, err := clientset.ExtensionsV1beta1().ReplicaSets(ns).Get(context.TODO(), "88d8c4c55657217522f3bb86cfbded7e-deployment-7545b75dbd", metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error: %+v", err)
}

View File

@ -19,6 +19,10 @@
package tcp
import (
"context"
"testing"
"time"
"github.com/goodrain/rainbond/gateway/annotations/parser"
"github.com/goodrain/rainbond/gateway/controller"
corev1 "k8s.io/api/core/v1"
@ -29,8 +33,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"testing"
"time"
)
func TestTcp(t *testing.T) {
@ -130,13 +132,13 @@ func TestTcp(t *testing.T) {
func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *testing.T) *corev1.Namespace {
t.Helper()
n, err := clientSet.CoreV1().Namespaces().Update(ns)
n, err := clientSet.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Namespace %v not found, creating", ns)
n, err = clientSet.CoreV1().Namespaces().Create(ns)
n, err = clientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating namespace %+v: %v", ns, err)
}
@ -155,13 +157,13 @@ func ensureNamespace(ns *corev1.Namespace, clientSet kubernetes.Interface, t *te
func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t *testing.T) *v1beta1.Deployment {
t.Helper()
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(deploy)
dm, err := clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Deployment %v not found, creating", deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(deploy)
dm, err = clientSet.ExtensionsV1beta1().Deployments(deploy.Namespace).Create(context.TODO(), deploy, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating deployment %+v: %v", deploy, err)
}
@ -180,9 +182,9 @@ func ensureDeploy(deploy *v1beta1.Deployment, clientSet kubernetes.Interface, t
func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *testing.T) *corev1.Service {
t.Helper()
clientSet.CoreV1().Services(service.Namespace).Delete(service.Name, &metav1.DeleteOptions{})
clientSet.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{})
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(service)
svc, err := clientSet.CoreV1().Services(service.Namespace).Create(context.TODO(), service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating service %+v: %v", service, err)
}
@ -193,13 +195,13 @@ func ensureService(service *corev1.Service, clientSet kubernetes.Interface, t *t
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface, t *testing.T) *extensions.Ingress {
t.Helper()
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
ing, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.TODO(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
t.Logf("Ingress %v not found, creating", ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
ing, err = clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), ingress, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating ingress %+v: %v", ingress, err)
}

90
go.mod
View File

@ -4,9 +4,11 @@ go 1.13
require (
github.com/DATA-DOG/go-sqlmock v1.3.3
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 // indirect
github.com/aliyun/aliyun-oss-go-sdk v2.1.4+incompatible
github.com/aws/aws-sdk-go v1.34.17
github.com/aws/aws-sdk-go v1.36.15
github.com/barnettZQG/gotty v1.0.1-0.20200904091006-a0a1f7d747dc
github.com/beorn7/perks v1.0.1
github.com/bitly/go-simplejson v0.5.0
@ -14,11 +16,10 @@ require (
github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292 // indirect
github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c // indirect
github.com/coreos/etcd v3.3.17+incompatible
github.com/coreos/prometheus-operator v0.38.3
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/creack/pty v1.1.11 // indirect
github.com/docker/cli v0.0.0-20190711175710-5b38d82aa076
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v1.13.1
github.com/docker/docker v20.10.1+incompatible
github.com/docker/go-units v0.4.0
github.com/docker/libcompose v0.4.1-0.20190808084053-143e0f3f1ab9
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
@ -34,43 +35,58 @@ require (
github.com/go-chi/render v1.0.1
github.com/go-kit/kit v0.10.0
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/spec v0.19.14 // indirect
github.com/go-openapi/swag v0.19.12 // indirect
github.com/go-playground/assert/v2 v2.0.1
github.com/go-playground/validator/v10 v10.2.0
github.com/go-sql-driver/mysql v1.5.0
github.com/gogo/protobuf v1.3.1
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
github.com/golang/mock v1.4.3
github.com/golang/protobuf v1.4.2
github.com/golang/mock v1.4.4
github.com/golang/protobuf v1.4.3
github.com/goodrain/rainbond-oam v0.0.0-20210206075623-511d0796af43
github.com/goodrain/rainbond-operator v1.0.0
github.com/google/go-cmp v0.5.4 // indirect
github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/gorilla/websocket v1.4.2
github.com/gosuri/uitable v0.0.4
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/howeyc/fsnotify v0.9.0
github.com/imdario/mergo v0.3.11
github.com/jinzhu/gorm v1.9.16
github.com/json-iterator/go v1.1.10
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/pty v1.1.8
github.com/lib/pq v1.3.0 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mattn/go-runewidth v0.0.6
github.com/mattn/go-shellwords v1.0.10 // indirect
github.com/mitchellh/go-ps v1.0.0
github.com/mitchellh/go-wordwrap v1.0.0
github.com/mitchellh/mapstructure v1.3.3
github.com/ncabatoff/process-exporter v0.7.1
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/onsi/ginkgo v1.14.1 // indirect
github.com/onsi/gomega v1.10.2 // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb // indirect
github.com/pborman/uuid v1.2.1
github.com/pebbe/zmq4 v1.2.1
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.12.0
github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7
github.com/prometheus/client_golang v1.7.1
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.45.0
github.com/prometheus-operator/prometheus-operator/pkg/client v0.45.0
github.com/prometheus/client_golang v1.9.0
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.10.0
github.com/prometheus/common v0.15.0
github.com/prometheus/node_exporter v1.0.1
github.com/prometheus/procfs v0.1.3
github.com/prometheus/procfs v0.2.0
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
github.com/shirou/gopsutil v2.20.8+incompatible
github.com/sirupsen/logrus v1.6.0
github.com/smartystreets/assertions v1.0.1 // indirect
github.com/smartystreets/goconvey v1.6.4
github.com/spf13/pflag v1.0.5
github.com/testcontainers/testcontainers-go v0.8.0
@ -82,25 +98,30 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.1.0 // indirect
github.com/yudai/umutex v0.0.0-20150817080136-18216d265c6b
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/grpc v1.29.0
go.uber.org/atomic v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9
golang.org/x/net v0.0.0-20201224014010-6772e930b67b
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a // indirect
golang.org/x/sys v0.0.0-20201223074533-0d417f636930
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324
golang.org/x/tools v0.0.0-20201228162255-34cd474b9958 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e // indirect
google.golang.org/grpc v1.33.2
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/src-d/go-git.v4 v4.13.1
gopkg.in/yaml.v2 v2.3.0
k8s.io/api v0.19.0
k8s.io/apiextensions-apiserver v0.19.0
k8s.io/apimachinery v0.19.0
k8s.io/apiserver v0.19.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.20.1
k8s.io/apiextensions-apiserver v0.20.1
k8s.io/apimachinery v0.20.1
k8s.io/apiserver v0.20.0
k8s.io/client-go v12.0.0+incompatible
k8s.io/klog/v2 v2.2.0 // indirect
k8s.io/kubernetes v1.19.0
k8s.io/component-base v0.20.1 // indirect
sigs.k8s.io/controller-runtime v0.6.1-0.20200831170621-ab55aa710b06 // indirect
)
// Pinned to kubernetes-1.16.2
// Pinned to kubernetes-1.20.0
replace (
github.com/coreos/etcd => github.com/coreos/etcd v3.2.31+incompatible
github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b
@ -110,17 +131,18 @@ replace (
github.com/godbus/dbus/v5 => github.com/godbus/dbus/v5 v5.0.3
github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.0
github.com/xeipuuv/gojsonschema => github.com/xeipuuv/gojsonschema v0.0.0-20160323030313-93e72a773fad
k8s.io/api => k8s.io/api v0.17.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.0
k8s.io/apimachinery => k8s.io/apimachinery v0.17.0
k8s.io/apiserver => k8s.io/apiserver v0.17.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5
k8s.io/client-go => k8s.io/client-go v0.17.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20191016115326-20453efc2458
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20191016115129-c07a134afb42
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894
k8s.io/component-base => k8s.io/component-base v0.0.0-20191016111319-039242c015a9
k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190828162817-608eb1dad4ac
google.golang.org/grpc => google.golang.org/grpc v1.29.0
k8s.io/api => k8s.io/api v0.20.0
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.0
k8s.io/apimachinery => k8s.io/apimachinery v0.20.0
k8s.io/apiserver => k8s.io/apiserver v0.20.0
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.0
k8s.io/client-go => k8s.io/client-go v0.20.0
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.0
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.0
k8s.io/code-generator => k8s.io/code-generator v0.20.0
k8s.io/component-base => k8s.io/component-base v0.20.0
k8s.io/cri-api => k8s.io/cri-api v0.20.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20191016115521-756ffa5af0bd
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20191016112429-9587704a8ad4
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20191016114939-2b2b218dc1df

633
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -25,17 +25,13 @@ import (
"os"
"os/exec"
"path"
"strings"
"time"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/grctl/clients"
"github.com/goodrain/rainbond/util"
"github.com/goodrain/rainbond/util/termtables"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -45,32 +41,6 @@ import (
func NewSourceBuildCmd() cli.Command {
c := cli.Command{
Subcommands: []cli.Command{
cli.Command{
Name: "test",
Usage: "build test source code, If it can be build, you can build in rainbond",
Flags: []cli.Flag{
cli.StringFlag{
Name: "dir",
Usage: "source code dir,default is current dir.",
Value: "",
},
cli.StringFlag{
Name: "lang",
Usage: "source code lang type, if not specified, will automatic identify",
Value: "",
},
cli.StringFlag{
Name: "image",
Usage: "builder image name",
Value: builder.BUILDERIMAGENAME,
},
cli.StringSliceFlag{
Name: "env",
Usage: "Build the required environment variables",
},
},
Action: build,
},
cli.Command{
Name: "list",
Usage: "Lists the building tasks pod currently being performed",
@ -129,7 +99,7 @@ func NewSourceBuildCmd() cli.Command {
Action: func(ctx *cli.Context) {
Common(ctx)
namespace := ctx.String("namespace")
cms, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{
cms, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: "configtype=mavensetting",
})
if err != nil {
@ -168,7 +138,7 @@ func NewSourceBuildCmd() cli.Command {
showError("Please specify the task pod name")
}
namespace := ctx.String("namespace")
cm, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
cm, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
showError(err.Error())
}
@ -197,7 +167,7 @@ func NewSourceBuildCmd() cli.Command {
showError("Please specify the task pod name")
}
namespace := ctx.String("namespace")
cm, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
cm, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
showError(err.Error())
}
@ -213,7 +183,7 @@ func NewSourceBuildCmd() cli.Command {
}
cm.Data["mavensetting"] = string(body)
cm.Annotations["updateTime"] = time.Now().Format(time.RFC3339)
_, err = clients.K8SClient.CoreV1().ConfigMaps(namespace).Update(cm)
_, err = clients.K8SClient.CoreV1().ConfigMaps(namespace).Update(context.Background(), cm, metav1.UpdateOptions{})
if err != nil {
showError(err.Error())
}
@ -267,7 +237,7 @@ func NewSourceBuildCmd() cli.Command {
config.Data = map[string]string{
"mavensetting": string(body),
}
_, err = clients.K8SClient.CoreV1().ConfigMaps(namespace).Create(config)
_, err = clients.K8SClient.CoreV1().ConfigMaps(namespace).Create(context.Background(), config, metav1.CreateOptions{})
if err != nil {
showError(err.Error())
}
@ -291,7 +261,7 @@ func NewSourceBuildCmd() cli.Command {
showError("Please specify the task pod name")
}
namespace := ctx.String("namespace")
err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Delete(name, &metav1.DeleteOptions{})
err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Delete(context.Background(), name, metav1.DeleteOptions{})
if err != nil {
showError(err.Error())
}
@ -307,101 +277,6 @@ func NewSourceBuildCmd() cli.Command {
return c
}
func build(c *cli.Context) error {
dir := c.String("dir")
if dir == "" {
dir = util.GetCurrentDir()
}
fmt.Printf("Start test build code:%s \n", dir)
envs := c.StringSlice("env")
var kvenv []*sources.KeyValue
for _, e := range envs {
if strings.Contains(e, "=") {
info := strings.Split(e, "=")
kvenv = append(kvenv, &sources.KeyValue{Key: info[0], Value: info[1]})
}
}
lang := c.String("lang")
if lang == "" {
var err error
lang, err = getLang(dir)
if err != nil {
fatal("automatic identify failure."+err.Error(), 1)
}
}
prepare(dir)
kvenv = append(kvenv, &sources.KeyValue{Key: "LANGUAGE", Value: lang})
containerConfig := &sources.ContainerConfig{
Metadata: &sources.ContainerMetadata{
Name: "buildcontainer",
},
Image: &sources.ImageSpec{
Image: c.String("image"),
},
Mounts: []*sources.Mount{
&sources.Mount{
ContainerPath: "/tmp/cache",
HostPath: path.Join(dir, ".cache"),
Readonly: false,
},
&sources.Mount{
ContainerPath: "/tmp/slug",
HostPath: path.Join(dir, ".release"),
Readonly: false,
},
},
Envs: kvenv,
Stdin: true,
StdinOnce: true,
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
NetworkConfig: &sources.NetworkConfig{
NetworkMode: "host",
},
Args: []string{"local"},
}
reader, err := getSourceCodeTarFile(dir)
if err != nil {
fatal("tar code failure."+err.Error(), 1)
}
defer func() {
reader.Close()
clear()
}()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
containerService := sources.CreateDockerService(ctx, createDockerCli())
containerID, err := containerService.CreateContainer(containerConfig)
if err != nil {
return fmt.Errorf("create builder container error:%s", err.Error())
}
closed := make(chan struct{})
defer close(closed)
errchan := make(chan error, 1)
close, err := containerService.AttachContainer(containerID, true, true, true, reader, os.Stdout, os.Stderr, &errchan)
if err != nil {
containerService.RemoveContainer(containerID)
return fmt.Errorf("attach builder container error:%s", err.Error())
}
defer close()
statuschan := containerService.WaitExitOrRemoved(containerID, true)
//start the container
if err := containerService.StartContainer(containerID); err != nil {
containerService.RemoveContainer(containerID)
return fmt.Errorf("start builder container error:%s", err.Error())
}
if err := <-errchan; err != nil {
logrus.Debugf("Error hijack: %s", err)
}
status := <-statuschan
if status != 0 {
fatal("build source code error", 1)
}
fmt.Println("BUILD SUCCESS")
return nil
}
func getLang(dir string) (string, error) {
lang, err := code.GetLangType(dir)
if err != nil {

View File

@ -19,6 +19,7 @@
package cmd
import (
"context"
"fmt"
"os"
@ -47,7 +48,7 @@ func NewCmdConfig() cli.Command {
Action: func(c *cli.Context) {
Common(c)
namespace := c.String("namespace")
configMap, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get("region-config", metav1.GetOptions{})
configMap, err := clients.K8SClient.CoreV1().ConfigMaps(namespace).Get(context.Background(), "region-config", metav1.GetOptions{})
if err != nil {
showError(err.Error())
}

View File

@ -1,6 +1,7 @@
package cmd
import (
"context"
"fmt"
"io/ioutil"
"os"
@ -52,7 +53,7 @@ func NewCmdInstall() cli.Command {
fmt.Println("Start install, please waiting!")
CommonWithoutRegion(c)
namespace := c.String("namespace")
apiClientSecrit, err := clients.K8SClient.CoreV1().Secrets(namespace).Get("rbd-api-client-cert", metav1.GetOptions{})
apiClientSecrit, err := clients.K8SClient.CoreV1().Secrets(namespace).Get(context.Background(), "rbd-api-client-cert", metav1.GetOptions{})
if err != nil {
showError(fmt.Sprintf("get region api tls secret failure %s", err.Error()))
}

View File

@ -19,6 +19,7 @@
package cmd
import (
"context"
"errors"
"fmt"
"net/url"
@ -376,7 +377,7 @@ func showServiceDeployInfo(c *cli.Context) error {
serviceTable.AddHeaders("Name", "IP", "Port")
for serviceID := range deployInfo.Services {
if clients.K8SClient != nil {
service, _ := clients.K8SClient.CoreV1().Services(tenantID).Get(serviceID, metav1.GetOptions{})
service, _ := clients.K8SClient.CoreV1().Services(tenantID).Get(context.Background(), serviceID, metav1.GetOptions{})
if service != nil {
var ports string
if service.Spec.Ports != nil && len(service.Spec.Ports) > 0 {
@ -398,7 +399,7 @@ func showServiceDeployInfo(c *cli.Context) error {
epTable.AddHeaders("Name", "IP", "Port", "Protocol")
for epname := range deployInfo.Endpoints {
if clients.K8SClient != nil {
ep, _ := clients.K8SClient.CoreV1().Endpoints(tenantID).Get(epname, metav1.GetOptions{})
ep, _ := clients.K8SClient.CoreV1().Endpoints(tenantID).Get(context.Background(), epname, metav1.GetOptions{})
if ep != nil {
for i := range ep.Subsets {
ss := &ep.Subsets[i]
@ -427,7 +428,7 @@ func showServiceDeployInfo(c *cli.Context) error {
ingressTable.AddHeaders("Name", "Host")
for ingressID := range deployInfo.Ingresses {
if clients.K8SClient != nil {
ingress, _ := clients.K8SClient.ExtensionsV1beta1().Ingresses(tenantID).Get(ingressID, metav1.GetOptions{})
ingress, _ := clients.K8SClient.ExtensionsV1beta1().Ingresses(tenantID).Get(context.Background(), ingressID, metav1.GetOptions{})
if ingress != nil {
for _, rule := range ingress.Spec.Rules {
ingressTable.AddRow(ingress.Name, rule.Host)
@ -444,7 +445,7 @@ func showServiceDeployInfo(c *cli.Context) error {
for podID := range deployInfo.Pods {
i++
if clients.K8SClient != nil {
pod, err := clients.K8SClient.CoreV1().Pods(tenantID).Get(podID, metav1.GetOptions{})
pod, err := clients.K8SClient.CoreV1().Pods(tenantID).Get(context.Background(), podID, metav1.GetOptions{})
if err != nil {
return err
}
@ -484,11 +485,11 @@ func showServiceDeployInfo(c *cli.Context) error {
}
claimName := vol.PersistentVolumeClaim.ClaimName
pvc, _ := clients.K8SClient.CoreV1().PersistentVolumeClaims(tenantID).Get(claimName, metav1.GetOptions{})
pvc, _ := clients.K8SClient.CoreV1().PersistentVolumeClaims(tenantID).Get(context.Background(), claimName, metav1.GetOptions{})
if pvc != nil {
pvn := pvc.Spec.VolumeName
volumeMount := name2Path[vol.Name]
pv, _ := clients.K8SClient.CoreV1().PersistentVolumes().Get(pvn, metav1.GetOptions{})
pv, _ := clients.K8SClient.CoreV1().PersistentVolumes().Get(context.Background(), pvn, metav1.GetOptions{})
if pv != nil {
switch {
case pv.Spec.HostPath != nil:

View File

@ -27,9 +27,9 @@ import (
"strings"
"time"
mv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
externalversions "github.com/coreos/prometheus-operator/pkg/client/informers/externalversions"
"github.com/coreos/prometheus-operator/pkg/client/versioned"
mv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
externalversions "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions"
"github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
"github.com/prometheus/common/model"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -24,7 +24,7 @@ import (
"testing"
"time"
mv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
mv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
yaml "gopkg.in/yaml.v2"
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/util/workqueue"

View File

@ -19,13 +19,15 @@
package kubecache
import (
"context"
"fmt"
"github.com/eapache/channels"
"k8s.io/apimachinery/pkg/labels"
"math"
"strings"
"time"
"github.com/eapache/channels"
"k8s.io/apimachinery/pkg/labels"
"github.com/pquerna/ffjson/ffjson"
conf "github.com/goodrain/rainbond/cmd/node/option"
@ -138,7 +140,7 @@ func (k *kubeClient) GetNodeByName(nodename string) (*v1.Node, error) {
// drain:true can't scheduler ,false can scheduler
func (k *kubeClient) CordonOrUnCordon(nodeName string, drain bool) (*v1.Node, error) {
data := fmt.Sprintf(`{"spec":{"unschedulable":%t}}`, drain)
node, err := k.kubeclient.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, []byte(data))
node, err := k.kubeclient.CoreV1().Nodes().Patch(context.Background(), nodeName, types.StrategicMergePatchType, []byte(data), metav1.PatchOptions{})
if err != nil {
return node, err
}
@ -152,7 +154,7 @@ func (k *kubeClient) UpdateLabels(nodeName string, labels map[string]string) (*v
return nil, err
}
data := fmt.Sprintf(`{"metadata":{"labels":%s}}`, string(labelStr))
node, err := k.kubeclient.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, []byte(data))
node, err := k.kubeclient.CoreV1().Nodes().Patch(context.Background(), nodeName, types.StrategicMergePatchType, []byte(data), metav1.PatchOptions{})
if err != nil {
return node, err
}
@ -179,7 +181,7 @@ func (k *kubeClient) DeleteOrEvictPodsSimple(nodeName string) error {
return nil
}
func (k *kubeClient) GetPodsByNodes(nodeName string) (pods []v1.Pod, err error) {
podList, err := k.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
podList, err := k.kubeclient.CoreV1().Pods(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{
FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
if err != nil {
return pods, err
@ -205,7 +207,7 @@ func (k *kubeClient) evictPod(pod v1.Pod, policyGroupVersion string) error {
DeleteOptions: deleteOptions,
}
// Remember to change change the URL manipulation func when Evction's version change
return k.kubeclient.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
return k.kubeclient.PolicyV1beta1().Evictions(eviction.Namespace).Evict(context.Background(), eviction)
}
// deleteOrEvictPods deletes or evicts the pods on the api server
@ -218,7 +220,7 @@ func (k *kubeClient) deleteOrEvictPods(pods []v1.Pod) error {
return err
}
getPodFn := func(namespace, name string) (*v1.Pod, error) {
return k.kubeclient.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
return k.kubeclient.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{})
}
return k.evictPods(pods, policyGroupVersion, getPodFn)
@ -271,7 +273,7 @@ func waitForDelete(pods []v1.Pod, interval, timeout time.Duration, usingEviction
return pods, err
}
func (k *kubeClient) deletePod(pod v1.Pod) error {
deleteOptions := &metav1.DeleteOptions{}
deleteOptions := metav1.DeleteOptions{}
//if GracePeriodSeconds >= 0 {
//if 1 >= 0 {
// //gracePeriodSeconds := int64(GracePeriodSeconds)
@ -280,7 +282,7 @@ func (k *kubeClient) deletePod(pod v1.Pod) error {
//}
gracePeriodSeconds := int64(1)
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
return k.kubeclient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions)
return k.kubeclient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, deleteOptions)
}
func (k *kubeClient) evictPods(pods []v1.Pod, policyGroupVersion string, getPodFn func(namespace, name string) (*v1.Pod, error)) error {
@ -419,8 +421,8 @@ func (k *kubeClient) DownK8sNode(nodename string) error {
}
func (k *kubeClient) deleteNodeWithoutPods(name string) error {
opt := &metav1.DeleteOptions{}
err := k.kubeclient.CoreV1().Nodes().Delete(name, opt)
opt := metav1.DeleteOptions{}
err := k.kubeclient.CoreV1().Nodes().Delete(context.Background(), name, opt)
if err != nil {
return err
}
@ -454,7 +456,7 @@ func (k *kubeClient) UpK8sNode(rainbondNode *client.HostNode) (*v1.Node, error)
}
//set rainbond creator lable
node.Labels["creator"] = "Rainbond"
savedNode, err := k.kubeclient.CoreV1().Nodes().Create(node)
savedNode, err := k.kubeclient.CoreV1().Nodes().Create(context.Background(), node, metav1.CreateOptions{})
if err != nil {
return nil, err
}
@ -479,7 +481,7 @@ func (k *kubeClient) GetNodes() ([]*v1.Node, error) {
return nil, err
}
if len(nodes) == 0 {
list, err := k.kubeclient.CoreV1().Nodes().List(metav1.ListOptions{})
list, err := k.kubeclient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
if err != nil {
return nil, err
}

View File

@ -19,9 +19,11 @@
package node
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"context"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
@ -36,7 +38,7 @@ func TestCluster_handleNodeStatus(t *testing.T) {
t.Fatal(err)
}
node, err := cli.CoreV1().Nodes().Get("192.168.2.200", metav1.GetOptions{})
node, err := cli.CoreV1().Nodes().Get(context.Background(), "192.168.2.200", metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}

View File

@ -1,6 +1,7 @@
package docker
import (
"context"
"io"
"os"
"path"
@ -22,7 +23,7 @@ func SyncDockerCertFromSecret(clientset kubernetes.Interface, namespace, secretN
if namespace == "" || secretName == "" {
return nil
}
secretInfo, err := clientset.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{})
secretInfo, err := clientset.CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{})
if err != nil {
return err
}

View File

@ -1,6 +1,7 @@
package docker
import (
"context"
"testing"
corev1 "k8s.io/api/core/v1"
@ -46,11 +47,11 @@ Dv5SVos+Rd/zF9Szg68uBOzkrFODygyzUjPgUtP1oIrPMFgvraYmbBQNdzT/7zBN
OIBrj5fMeg27zqsV/2Qr1YuzfMZcgQG9KtPSe57RZH9kF7pCl+cqetc=
-----END CERTIFICATE-----`)
secret.Data = data
if _, err := cli.CoreV1().Secrets(namespace).Create(secret); err != nil {
if _, err := cli.CoreV1().Secrets(namespace).Create(context.Background(), secret, metav1.CreateOptions{}); err != nil {
t.Fatalf("create secret error: %s", err.Error())
}
if err := SyncDockerCertFromSecret(cli, namespace, secretName); err != nil {
t.Fatalf("sync secret error: %s", err.Error())
}
cli.CoreV1().Secrets(namespace).Delete(secretName, &metav1.DeleteOptions{})
cli.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package k8s
import (
"context"
"fmt"
"os"
"strings"
@ -40,7 +41,7 @@ func ParseNameNS(input string) (string, string, error) {
// GetNodeIPOrName returns the IP address or the name of a node in the cluster
func GetNodeIPOrName(kubeClient clientset.Interface, name string, useInternalIP bool) string {
node, err := kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{})
node, err := kubeClient.CoreV1().Nodes().Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
glog.Errorf("Error getting node %v: %v", name, err)
return ""
@ -87,7 +88,7 @@ func GetPodDetails(kubeClient clientset.Interface) (*PodInfo, error) {
return nil, fmt.Errorf("unable to get POD information (missing POD_NAME or POD_NAMESPACE environment variable")
}
pod, _ := kubeClient.CoreV1().Pods(podNs).Get(podName, metav1.GetOptions{})
pod, _ := kubeClient.CoreV1().Pods(podNs).Get(context.Background(), podName, metav1.GetOptions{})
if pod == nil {
return nil, fmt.Errorf("unable to get POD information")
}

104
util/interrupt/interrupt.go Normal file
View File

@ -0,0 +1,104 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interrupt
import (
"os"
"os/signal"
"sync"
"syscall"
)
// terminationSignals are signals that cause the program to exit in the
// supported platforms (linux, darwin, windows).
var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}
// Handler guarantees execution of notifications after a critical section (the function passed
// to a Run method), even in the presence of process termination. It guarantees exactly once
// invocation of the provided notify functions.
type Handler struct {
notify []func()
final func(os.Signal)
once sync.Once
}
// Chain creates a new handler that invokes all notify functions when the critical section exits
// and then invokes the optional handler's notifications. This allows critical sections to be
// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed
// but should not exit (which is the responsibility of the parent handler).
func Chain(handler *Handler, notify ...func()) *Handler {
if handler == nil {
return New(nil, notify...)
}
return New(handler.Signal, append(notify, handler.Close)...)
}
// New creates a new handler that guarantees all notify functions are run after the critical
// section exits (or is interrupted by the OS), then invokes the final handler. If no final
// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for
// one critical section.
func New(final func(os.Signal), notify ...func()) *Handler {
return &Handler{
final: final,
notify: notify,
}
}
// Close executes all the notification handlers if they have not yet been executed.
func (h *Handler) Close() {
h.once.Do(func() {
for _, fn := range h.notify {
fn()
}
})
}
// Signal is called when an os.Signal is received, and guarantees that all notifications
// are executed, then the final handler is executed. This function should only be called once
// per Handler instance.
func (h *Handler) Signal(s os.Signal) {
h.once.Do(func() {
for _, fn := range h.notify {
fn()
}
if h.final == nil {
os.Exit(1)
}
h.final(s)
})
}
// Run ensures that any notifications are invoked after the provided fn exits (even if the
// process is interrupted by an OS termination signal). Notifications are only invoked once
// per Handler instance, so calling Run more than once will not behave as the user expects.
func (h *Handler) Run(fn func() error) error {
ch := make(chan os.Signal, 1)
signal.Notify(ch, terminationSignals...)
defer func() {
signal.Stop(ch)
close(ch)
}()
go func() {
sig, ok := <-ch
if !ok {
return
}
h.Signal(sig)
}()
defer h.Close()
return fn()
}

View File

@ -320,7 +320,7 @@ func SetConfigDefaults(config *rest.Config) error {
//GetContainerArgs get default container name
func (app *App) GetContainerArgs(namespace, podname, containerName string) (string, string, []string, error) {
var args = []string{"/bin/sh"}
pod, err := app.coreClient.CoreV1().Pods(namespace).Get(podname, metav1.GetOptions{})
pod, err := app.coreClient.CoreV1().Pods(namespace).Get(context.Background(), podname, metav1.GetOptions{})
if err != nil {
return "", "", args, err
}

View File

@ -22,7 +22,7 @@ import (
"github.com/docker/docker/pkg/term"
"k8s.io/kubernetes/pkg/util/interrupt"
"github.com/goodrain/rainbond/util/interrupt"
)
// SafeFunc is a function to be invoked by TTY.

View File

@ -19,6 +19,8 @@
package controller
import (
"context"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
@ -44,7 +46,7 @@ func (a *applyConfigController) Begin() {
for _, new := range newConfigMaps {
if nowConfig, ok := nowConfigMapMaps[new.Name]; ok {
new.UID = nowConfig.UID
newc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Update(new)
newc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Update(context.Background(), new, metav1.UpdateOptions{})
if err != nil {
logrus.Errorf("update config map failure %s", err.Error())
}
@ -52,7 +54,7 @@ func (a *applyConfigController) Begin() {
nowConfigMapMaps[new.Name] = nil
logrus.Debugf("update configmap %s for service %s", new.Name, a.appService.ServiceID)
} else {
newc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Create(new)
newc, err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Create(context.Background(), new, metav1.CreateOptions{})
if err != nil {
logrus.Errorf("update config map failure %s", err.Error())
}
@ -62,7 +64,7 @@ func (a *applyConfigController) Begin() {
}
for name, handle := range nowConfigMapMaps {
if handle != nil {
if err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Delete(name, &metav1.DeleteOptions{}); err != nil {
if err := a.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
logrus.Errorf("delete config map failure %s", err.Error())
}
logrus.Debugf("delete configmap %s for service %s", name, a.appService.ServiceID)

View File

@ -19,8 +19,11 @@
package controller
import (
"context"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
@ -29,14 +32,14 @@ func CreateKubeService(client kubernetes.Interface, namespace string, services .
var retryService []*corev1.Service
for i := range services {
createService := services[i]
if _, err := client.CoreV1().Services(namespace).Create(createService); err != nil {
if _, err := client.CoreV1().Services(namespace).Create(context.Background(), createService, metav1.CreateOptions{}); err != nil {
// Ignore if the Service is invalid with this error message:
// Service "kube-dns" is invalid: spec.clusterIP: Invalid value: "10.96.0.10": provided IP is already allocated
if !errors.IsAlreadyExists(err) && !errors.IsInvalid(err) {
retryService = append(retryService, createService)
continue
}
if _, err := client.CoreV1().Services(namespace).Update(createService); err != nil {
if _, err := client.CoreV1().Services(namespace).Update(context.Background(), createService, metav1.UpdateOptions{}); err != nil {
retryService = append(retryService, createService)
continue
}
@ -44,7 +47,7 @@ func CreateKubeService(client kubernetes.Interface, namespace string, services .
}
//second attempt
for _, service := range retryService {
_, err := client.CoreV1().Services(namespace).Create(service)
_, err := client.CoreV1().Services(namespace).Create(context.Background(), service, metav1.CreateOptions{})
if err != nil {
if errors.IsAlreadyExists(err) {
continue

View File

@ -19,6 +19,7 @@
package controller
import (
"context"
"sync"
"github.com/sirupsen/logrus"
@ -59,7 +60,7 @@ func (a *refreshXPAController) applyOne(clientset kubernetes.Interface, app *v1.
for _, hpa := range app.GetDelHPAs() {
logrus.Debugf("hpa name: %s; start deleting hpa.", hpa.GetName())
err := clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(hpa.GetName(), &metav1.DeleteOptions{})
err := clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(context.Background(), hpa.GetName(), metav1.DeleteOptions{})
if err != nil {
// don't return error, hope it is ok next time
logrus.Warningf("error deleting secret(%#v): %v", hpa, err)

View File

@ -19,6 +19,7 @@
package controller
import (
"context"
"fmt"
"math"
"sync"
@ -28,6 +29,7 @@ import (
"github.com/goodrain/rainbond/util"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
)
@ -69,14 +71,26 @@ func Replicas(n int) []byte {
func (s *scalingController) scalingOne(service v1.AppService) error {
if statefulset := service.GetStatefulSet(); statefulset != nil {
_, err := s.manager.client.AppsV1().StatefulSets(statefulset.Namespace).Patch(statefulset.Name, types.StrategicMergePatchType, Replicas(int(service.Replicas)))
_, err := s.manager.client.AppsV1().StatefulSets(statefulset.Namespace).Patch(
context.Background(),
statefulset.Name,
types.StrategicMergePatchType,
Replicas(int(service.Replicas)),
metav1.PatchOptions{},
)
if err != nil {
logrus.Error("patch statefulset info error.", err.Error())
return err
}
}
if deployment := service.GetDeployment(); deployment != nil {
_, err := s.manager.client.AppsV1().Deployments(deployment.Namespace).Patch(deployment.Name, types.StrategicMergePatchType, Replicas(int(service.Replicas)))
_, err := s.manager.client.AppsV1().Deployments(deployment.Namespace).Patch(
context.Background(),
deployment.Name,
types.StrategicMergePatchType,
Replicas(int(service.Replicas)),
metav1.PatchOptions{},
)
if err != nil {
logrus.Error("patch deployment info error.", err.Error())
return err

View File

@ -19,6 +19,7 @@
package controller
import (
"context"
"fmt"
"sync"
"time"
@ -91,10 +92,10 @@ func (s *startController) errorCallback(app v1.AppService) error {
}
func (s *startController) startOne(app v1.AppService) error {
//first: check and create namespace
_, err := s.manager.client.CoreV1().Namespaces().Get(app.TenantID, metav1.GetOptions{})
_, err := s.manager.client.CoreV1().Namespaces().Get(context.Background(), app.TenantID, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
_, err = s.manager.client.CoreV1().Namespaces().Create(app.GetTenant())
_, err = s.manager.client.CoreV1().Namespaces().Create(context.Background(), app.GetTenant(), metav1.CreateOptions{})
}
if err != nil {
return fmt.Errorf("create or check namespace failure %s", err.Error())
@ -103,7 +104,7 @@ func (s *startController) startOne(app v1.AppService) error {
//step 1: create configmap
if configs := app.GetConfigMaps(); configs != nil {
for _, config := range configs {
_, err := s.manager.client.CoreV1().ConfigMaps(app.TenantID).Create(config)
_, err := s.manager.client.CoreV1().ConfigMaps(app.TenantID).Create(context.Background(), config, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create config map failure:%s", err.Error())
}
@ -112,20 +113,20 @@ func (s *startController) startOne(app v1.AppService) error {
// create claims
for _, claim := range app.GetClaimsManually() {
logrus.Debugf("create claim: %s", claim.Name)
_, err := s.manager.client.CoreV1().PersistentVolumeClaims(app.TenantID).Create(claim)
_, err := s.manager.client.CoreV1().PersistentVolumeClaims(app.TenantID).Create(context.Background(), claim, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create claims: %v", err)
}
}
//step 2: create statefulset or deployment
if statefulset := app.GetStatefulSet(); statefulset != nil {
_, err = s.manager.client.AppsV1().StatefulSets(app.TenantID).Create(statefulset)
_, err = s.manager.client.AppsV1().StatefulSets(app.TenantID).Create(context.Background(), statefulset, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("create statefulset failure:%s", err.Error())
}
}
if deployment := app.GetDeployment(); deployment != nil {
_, err = s.manager.client.AppsV1().Deployments(app.TenantID).Create(deployment)
_, err = s.manager.client.AppsV1().Deployments(app.TenantID).Create(context.Background(), deployment, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("create deployment failure:%s;", err.Error())
}
@ -140,7 +141,7 @@ func (s *startController) startOne(app v1.AppService) error {
if secrets := append(app.GetSecrets(true), app.GetEnvVarSecrets(true)...); secrets != nil {
for _, secret := range secrets {
if len(secret.ResourceVersion) == 0 {
_, err := s.manager.client.CoreV1().Secrets(app.TenantID).Create(secret)
_, err := s.manager.client.CoreV1().Secrets(app.TenantID).Create(context.Background(), secret, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create secret failure:%s", err.Error())
}
@ -151,7 +152,7 @@ func (s *startController) startOne(app v1.AppService) error {
if ingresses := app.GetIngress(true); ingresses != nil {
for _, ingress := range ingresses {
if len(ingress.ResourceVersion) == 0 {
_, err := s.manager.client.ExtensionsV1beta1().Ingresses(app.TenantID).Create(ingress)
_, err := s.manager.client.ExtensionsV1beta1().Ingresses(app.TenantID).Create(context.Background(), ingress, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create ingress failure:%s", err.Error())
}
@ -162,7 +163,7 @@ func (s *startController) startOne(app v1.AppService) error {
if hpas := app.GetHPAs(); len(hpas) != 0 {
for _, hpa := range hpas {
if len(hpa.ResourceVersion) == 0 {
_, err := s.manager.client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Create(hpa)
_, err := s.manager.client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Create(context.Background(), hpa, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
logrus.Debugf("hpa: %#v", hpa)
return fmt.Errorf("create hpa: %v", err)
@ -181,7 +182,7 @@ func (s *startController) startOne(app v1.AppService) error {
if smClient != nil {
for _, sm := range sms {
if len(sm.ResourceVersion) == 0 {
_, err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Create(sm)
_, err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Create(context.Background(), sm, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
logrus.Errorf("create service monitor failure: %s", err.Error())
}

View File

@ -19,6 +19,7 @@
package controller
import (
"context"
"fmt"
"sync"
"time"
@ -68,7 +69,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
if services := app.GetServices(true); services != nil {
for _, service := range services {
if service != nil && service.Name != "" {
err := s.manager.client.CoreV1().Services(app.TenantID).Delete(service.Name, &metav1.DeleteOptions{
err := s.manager.client.CoreV1().Services(app.TenantID).Delete(context.Background(), service.Name, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
})
if err != nil && !errors.IsNotFound(err) {
@ -81,7 +82,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
if secrets := app.GetSecrets(true); secrets != nil {
for _, secret := range secrets {
if secret != nil && secret.Name != "" {
err := s.manager.client.CoreV1().Secrets(app.TenantID).Delete(secret.Name, &metav1.DeleteOptions{
err := s.manager.client.CoreV1().Secrets(app.TenantID).Delete(context.Background(), secret.Name, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
})
if err != nil && !errors.IsNotFound(err) {
@ -94,7 +95,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
if ingresses := app.GetIngress(true); ingresses != nil {
for _, ingress := range ingresses {
if ingress != nil && ingress.Name != "" {
err := s.manager.client.ExtensionsV1beta1().Ingresses(app.TenantID).Delete(ingress.Name, &metav1.DeleteOptions{
err := s.manager.client.ExtensionsV1beta1().Ingresses(app.TenantID).Delete(context.Background(), ingress.Name, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
})
if err != nil && !errors.IsNotFound(err) {
@ -107,7 +108,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
if configs := app.GetConfigMaps(); configs != nil {
for _, config := range configs {
if config != nil && config.Name != "" {
err := s.manager.client.CoreV1().ConfigMaps(app.TenantID).Delete(config.Name, &metav1.DeleteOptions{
err := s.manager.client.CoreV1().ConfigMaps(app.TenantID).Delete(context.Background(), config.Name, metav1.DeleteOptions{
GracePeriodSeconds: &zero,
})
if err != nil && !errors.IsNotFound(err) {
@ -118,14 +119,14 @@ func (s *stopController) stopOne(app v1.AppService) error {
}
//step 5: delete statefulset or deployment
if statefulset := app.GetStatefulSet(); statefulset != nil {
err := s.manager.client.AppsV1().StatefulSets(app.TenantID).Delete(statefulset.Name, &metav1.DeleteOptions{})
err := s.manager.client.AppsV1().StatefulSets(app.TenantID).Delete(context.Background(), statefulset.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("delete statefulset failure:%s", err.Error())
}
s.manager.store.OnDeletes(statefulset)
}
if deployment := app.GetDeployment(); deployment != nil && deployment.Name != "" {
err := s.manager.client.AppsV1().Deployments(app.TenantID).Delete(deployment.Name, &metav1.DeleteOptions{})
err := s.manager.client.AppsV1().Deployments(app.TenantID).Delete(context.Background(), deployment.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("delete deployment failure:%s", err.Error())
}
@ -136,7 +137,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
if pods := app.GetPods(true); pods != nil {
for _, pod := range pods {
if pod != nil && pod.Name != "" {
err := s.manager.client.CoreV1().Pods(app.TenantID).Delete(pod.Name, &metav1.DeleteOptions{
err := s.manager.client.CoreV1().Pods(app.TenantID).Delete(context.Background(), pod.Name, metav1.DeleteOptions{
GracePeriodSeconds: &gracePeriodSeconds,
})
if err != nil && !errors.IsNotFound(err) {
@ -148,7 +149,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
//step 7: deleta all hpa
if hpas := app.GetHPAs(); len(hpas) != 0 {
for _, hpa := range hpas {
err := s.manager.client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(hpa.GetName(), &metav1.DeleteOptions{})
err := s.manager.client.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(context.Background(), hpa.GetName(), metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("delete hpa: %v", err)
}
@ -164,7 +165,7 @@ func (s *stopController) stopOne(app v1.AppService) error {
}
if smClient != nil {
for _, sm := range sms {
err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Delete(sm.GetName(), &metav1.DeleteOptions{})
err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Delete(context.Background(), sm.GetName(), metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logrus.Errorf("delete service monitor failure: %s", err.Error())
}

View File

@ -19,6 +19,7 @@
package controller
import (
"context"
"fmt"
"sync"
"time"
@ -78,7 +79,7 @@ func (s *upgradeController) upgradeConfigMap(newapp v1.AppService) {
for _, new := range newConfigMaps {
if nowConfig, ok := nowConfigMapMaps[new.Name]; ok {
new.UID = nowConfig.UID
newc, err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Update(new)
newc, err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Update(context.Background(), new, metav1.UpdateOptions{})
if err != nil {
logrus.Errorf("update config map failure %s", err.Error())
}
@ -86,7 +87,7 @@ func (s *upgradeController) upgradeConfigMap(newapp v1.AppService) {
nowConfigMapMaps[new.Name] = nil
logrus.Debugf("update configmap %s for service %s", new.Name, newapp.ServiceID)
} else {
newc, err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Create(new)
newc, err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Create(context.Background(), new, metav1.CreateOptions{})
if err != nil {
logrus.Errorf("update config map failure %s", err.Error())
}
@ -96,7 +97,7 @@ func (s *upgradeController) upgradeConfigMap(newapp v1.AppService) {
}
for name, handle := range nowConfigMapMaps {
if handle != nil {
if err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Delete(name, &metav1.DeleteOptions{}); err != nil {
if err := s.manager.client.CoreV1().ConfigMaps(nowApp.TenantID).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
logrus.Errorf("delete config map failure %s", err.Error())
}
logrus.Debugf("delete configmap %s for service %s", name, newapp.ServiceID)
@ -118,7 +119,7 @@ func (s *upgradeController) upgradeService(newapp v1.AppService) {
nowConfig.Spec.Ports = new.Spec.Ports
nowConfig.Spec.Type = new.Spec.Type
nowConfig.Labels = new.Labels
newc, err := s.manager.client.CoreV1().Services(nowApp.TenantID).Update(nowConfig)
newc, err := s.manager.client.CoreV1().Services(nowApp.TenantID).Update(context.Background(), nowConfig, metav1.UpdateOptions{})
if err != nil {
logrus.Errorf("update service failure %s", err.Error())
}
@ -136,7 +137,7 @@ func (s *upgradeController) upgradeService(newapp v1.AppService) {
}
for name, handle := range nowServiceMaps {
if handle != nil {
if err := s.manager.client.CoreV1().Services(nowApp.TenantID).Delete(name, &metav1.DeleteOptions{}); err != nil {
if err := s.manager.client.CoreV1().Services(nowApp.TenantID).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
logrus.Errorf("delete service failure %s", err.Error())
}
logrus.Debugf("delete service %s for service %s", name, newapp.ServiceID)
@ -155,7 +156,7 @@ func (s *upgradeController) upgradeClaim(newapp v1.AppService) {
if o, ok := nowClaimMaps[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
claim, err := s.manager.client.CoreV1().PersistentVolumeClaims(n.Namespace).Update(n)
claim, err := s.manager.client.CoreV1().PersistentVolumeClaims(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
logrus.Errorf("update claim[%s] error: %s", n.GetName(), err.Error())
continue
@ -164,7 +165,7 @@ func (s *upgradeController) upgradeClaim(newapp v1.AppService) {
delete(nowClaimMaps, o.Name)
logrus.Debugf("ServiceID: %s; successfully update claim: %s", nowApp.ServiceID, n.Name)
} else {
claim, err := s.manager.client.CoreV1().PersistentVolumeClaims(n.Namespace).Create(n)
claim, err := s.manager.client.CoreV1().PersistentVolumeClaims(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
logrus.Errorf("error create claim: %+v: err: %v", claim.GetName(), err)
continue
@ -177,10 +178,10 @@ func (s *upgradeController) upgradeClaim(newapp v1.AppService) {
func (s *upgradeController) upgradeOne(app v1.AppService) error {
//first: check and create namespace
_, err := s.manager.client.CoreV1().Namespaces().Get(app.TenantID, metav1.GetOptions{})
_, err := s.manager.client.CoreV1().Namespaces().Get(context.Background(), app.TenantID, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
_, err = s.manager.client.CoreV1().Namespaces().Create(app.GetTenant())
_, err = s.manager.client.CoreV1().Namespaces().Create(context.Background(), app.GetTenant(), metav1.CreateOptions{})
}
if err != nil {
return fmt.Errorf("create or check namespace failure %s", err.Error())
@ -188,7 +189,7 @@ func (s *upgradeController) upgradeOne(app v1.AppService) error {
}
s.upgradeConfigMap(app)
if deployment := app.GetDeployment(); deployment != nil {
_, err = s.manager.client.AppsV1().Deployments(deployment.Namespace).Patch(deployment.Name, types.MergePatchType, app.UpgradePatch["deployment"])
_, err = s.manager.client.AppsV1().Deployments(deployment.Namespace).Patch(context.Background(), deployment.Name, types.MergePatchType, app.UpgradePatch["deployment"], metav1.PatchOptions{})
if err != nil {
app.Logger.Error(fmt.Sprintf("upgrade deployment %s failure %s", app.ServiceAlias, err.Error()), event.GetLoggerOption("failure"))
return fmt.Errorf("upgrade deployment %s failure %s", app.ServiceAlias, err.Error())
@ -198,14 +199,14 @@ func (s *upgradeController) upgradeOne(app v1.AppService) error {
// create claims
for _, claim := range app.GetClaimsManually() {
logrus.Debugf("create claim: %s", claim.Name)
_, err := s.manager.client.CoreV1().PersistentVolumeClaims(app.TenantID).Create(claim)
_, err := s.manager.client.CoreV1().PersistentVolumeClaims(app.TenantID).Create(context.Background(), claim, metav1.CreateOptions{})
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("create claims: %v", err)
}
}
if statefulset := app.GetStatefulSet(); statefulset != nil {
_, err = s.manager.client.AppsV1().StatefulSets(statefulset.Namespace).Patch(statefulset.Name, types.MergePatchType, app.UpgradePatch["statefulset"])
_, err = s.manager.client.AppsV1().StatefulSets(statefulset.Namespace).Patch(context.Background(), statefulset.Name, types.MergePatchType, app.UpgradePatch["statefulset"], metav1.PatchOptions{})
if err != nil {
logrus.Errorf("patch statefulset error : %s", err.Error())
app.Logger.Error(fmt.Sprintf("upgrade statefulset %s failure %s", app.ServiceAlias, err.Error()), event.GetLoggerOption("failure"))

View File

@ -1,10 +1,12 @@
package conversion
import (
"context"
"testing"
"github.com/goodrain/rainbond/db/model"
k8sutil "github.com/goodrain/rainbond/util/k8s"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestCreateMetricSpec(t *testing.T) {
@ -51,7 +53,7 @@ func TestNewHPA(t *testing.T) {
t.Fatalf("error creating k8s clientset: %s", err.Error())
}
_, err = clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Create(hpa)
_, err = clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Create(context.Background(), hpa, metav1.CreateOptions{})
if err != nil {
t.Fatalf("create hpa: %v", err)
}

View File

@ -19,6 +19,7 @@
package conversion
import (
"context"
"fmt"
"strconv"
"testing"
@ -242,17 +243,17 @@ func TestApplyTcpRule(t *testing.T) {
if err != nil {
t.Fatalf("create kube api client error: %v", err)
}
if _, err := clientSet.CoreV1().Namespaces().Create(&corev1.Namespace{
if _, err := clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testCase["namespace"],
},
}); err != nil {
}, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Namespace(%s): %v", testCase["namespace"], err)
}
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing); err != nil {
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(context.Background(), ing, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Ingress(%s): %v", ing.Name, err)
}
if err := clientSet.CoreV1().Namespaces().Delete(testCase["namespace"], &metav1.DeleteOptions{}); err != nil {
if err := clientSet.CoreV1().Namespaces().Delete(context.Background(), testCase["namespace"], metav1.DeleteOptions{}); err != nil {
t.Errorf("Can't delete namespace(%s)", testCase["namespace"])
}
}
@ -380,17 +381,17 @@ func TestAppServiceBuild_ApplyHttpRule(t *testing.T) {
if err != nil {
t.Fatalf("create kube api client error: %v", err)
}
if _, err := clientSet.CoreV1().Namespaces().Create(&corev1.Namespace{
if _, err := clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testCase["namespace"],
},
}); err != nil {
}, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Namespace(%s): %v", testCase["namespace"], err)
}
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing); err != nil {
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(context.Background(), ing, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Ingress(%s): %v", ing.Name, err)
}
if err := clientSet.CoreV1().Namespaces().Delete(testCase["namespace"], &metav1.DeleteOptions{}); err != nil {
if err := clientSet.CoreV1().Namespaces().Delete(context.Background(), testCase["namespace"], metav1.DeleteOptions{}); err != nil {
t.Errorf("Can't delete namespace(%s)", testCase["namespace"])
}
}
@ -511,20 +512,20 @@ func TestAppServiceBuild_ApplyHttpRuleWithCertificate(t *testing.T) {
if err != nil {
t.Fatalf("create kube api client error: %v", err)
}
if _, err := clientSet.CoreV1().Namespaces().Create(&corev1.Namespace{
if _, err := clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testCase["namespace"],
},
}); err != nil {
}, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Serect(%s): %v", sec.Name, err)
}
if _, err := clientSet.CoreV1().Secrets(sec.Namespace).Create(sec); err != nil {
if _, err := clientSet.CoreV1().Secrets(sec.Namespace).Create(context.Background(), sec, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Serect(%s): %v", sec.Name, err)
}
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(ing); err != nil {
if _, err := clientSet.ExtensionsV1beta1().Ingresses(ing.Namespace).Create(context.Background(), ing, metav1.CreateOptions{}); err != nil {
t.Errorf("Can't create Ingress(%s): %v", ing.Name, err)
}
if err := clientSet.CoreV1().Namespaces().Delete(testCase["namespace"], &metav1.DeleteOptions{}); err != nil {
if err := clientSet.CoreV1().Namespaces().Delete(context.Background(), testCase["namespace"], metav1.DeleteOptions{}); err != nil {
t.Errorf("Can't delete namespace(%s)", testCase["namespace"])
}

View File

@ -3,10 +3,10 @@ package conversion
import (
"time"
mv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/goodrain/rainbond/db"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"github.com/jinzhu/gorm"
mv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@ -19,13 +19,14 @@
package f
import (
"context"
"fmt"
"time"
monitorv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/coreos/prometheus-operator/pkg/client/versioned"
"github.com/goodrain/rainbond/gateway/annotations/parser"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
"github.com/sirupsen/logrus"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
corev1 "k8s.io/api/core/v1"
@ -44,10 +45,10 @@ const (
// ApplyOne applies one rule.
func ApplyOne(clientset kubernetes.Interface, app *v1.AppService) error {
_, err := clientset.CoreV1().Namespaces().Get(app.TenantID, metav1.GetOptions{})
_, err := clientset.CoreV1().Namespaces().Get(context.Background(), app.TenantID, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err = clientset.CoreV1().Namespaces().Create(app.GetTenant())
_, err = clientset.CoreV1().Namespaces().Create(context.Background(), app.GetTenant(), metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return fmt.Errorf("error creating namespace: %v", err)
}
@ -105,7 +106,7 @@ func ApplyOne(clientset kubernetes.Interface, app *v1.AppService) error {
}
// delete delIngress
for _, ing := range app.GetDelIngs() {
err := clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, &metav1.DeleteOptions{})
err := clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(context.Background(), ing.Name, metav1.DeleteOptions{})
if err != nil && !k8sErrors.IsNotFound(err) {
// don't return error, hope it is ok next time
logrus.Warningf("error deleting ingress(%v): %v", ing, err)
@ -113,7 +114,7 @@ func ApplyOne(clientset kubernetes.Interface, app *v1.AppService) error {
}
// delete delSecrets
for _, secret := range app.GetDelSecrets() {
err := clientset.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{})
err := clientset.CoreV1().Secrets(secret.Namespace).Delete(context.Background(), secret.Name, metav1.DeleteOptions{})
if err != nil && !k8sErrors.IsNotFound(err) {
// don't return error, hope it is ok next time
logrus.Warningf("error deleting secret(%v): %v", secret, err)
@ -121,7 +122,7 @@ func ApplyOne(clientset kubernetes.Interface, app *v1.AppService) error {
}
// delete delServices
for _, svc := range app.GetDelServices() {
err := clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, &metav1.DeleteOptions{})
err := clientset.CoreV1().Services(svc.Namespace).Delete(context.Background(), svc.Name, metav1.DeleteOptions{})
if err != nil && !k8sErrors.IsNotFound(err) {
// don't return error, hope it is ok next time
logrus.Warningf("error deleting service(%v): %v", svc, err)
@ -133,10 +134,10 @@ func ApplyOne(clientset kubernetes.Interface, app *v1.AppService) error {
}
func ensureService(new *corev1.Service, clientSet kubernetes.Interface) error {
old, err := clientSet.CoreV1().Services(new.Namespace).Get(new.Name, metav1.GetOptions{})
old, err := clientSet.CoreV1().Services(new.Namespace).Get(context.Background(), new.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err = clientSet.CoreV1().Services(new.Namespace).Create(new)
_, err = clientSet.CoreV1().Services(new.Namespace).Create(context.Background(), new, metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
logrus.Warningf("error creating service %+v: %v", new, err)
}
@ -155,7 +156,7 @@ func ensureService(new *corev1.Service, clientSet kubernetes.Interface) error {
func persistUpdate(service *corev1.Service, clientSet kubernetes.Interface) error {
var err error
for i := 0; i < clientRetryCount; i++ {
_, err = clientSet.CoreV1().Services(service.Namespace).UpdateStatus(service)
_, err = clientSet.CoreV1().Services(service.Namespace).UpdateStatus(context.Background(), service, metav1.UpdateOptions{})
if err == nil {
return nil
}
@ -180,10 +181,10 @@ func persistUpdate(service *corev1.Service, clientSet kubernetes.Interface) erro
}
func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface) {
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(ingress)
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Update(context.Background(), ingress, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(ingress)
_, err := clientSet.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.Background(), ingress, metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
logrus.Errorf("error creating ingress %+v: %v", ingress, err)
}
@ -194,11 +195,11 @@ func ensureIngress(ingress *extensions.Ingress, clientSet kubernetes.Interface)
}
func ensureSecret(secret *corev1.Secret, clientSet kubernetes.Interface) {
_, err := clientSet.CoreV1().Secrets(secret.Namespace).Update(secret)
_, err := clientSet.CoreV1().Secrets(secret.Namespace).Update(context.Background(), secret, metav1.UpdateOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err := clientSet.CoreV1().Secrets(secret.Namespace).Create(secret)
_, err := clientSet.CoreV1().Secrets(secret.Namespace).Create(context.Background(), secret, metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
logrus.Warningf("error creating secret %+v: %v", secret, err)
}
@ -211,7 +212,7 @@ func ensureSecret(secret *corev1.Secret, clientSet kubernetes.Interface) {
// EnsureEndpoints creates or updates endpoints.
func EnsureEndpoints(ep *corev1.Endpoints, clientSet kubernetes.Interface) error {
// See if there's actually an update here.
currentEndpoints, err := clientSet.CoreV1().Endpoints(ep.Namespace).Get(ep.Name, metav1.GetOptions{})
currentEndpoints, err := clientSet.CoreV1().Endpoints(ep.Namespace).Get(context.Background(), ep.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
currentEndpoints = &corev1.Endpoints{
@ -241,11 +242,11 @@ func EnsureEndpoints(ep *corev1.Endpoints, clientSet kubernetes.Interface) error
}
if createEndpoints {
// No previous endpoints, create them
_, err = clientSet.CoreV1().Endpoints(ep.Namespace).Create(newEndpoints)
_, err = clientSet.CoreV1().Endpoints(ep.Namespace).Create(context.Background(), newEndpoints, metav1.CreateOptions{})
logrus.Infof("Create endpoints for %v/%v", ep.Namespace, ep.Name)
} else {
// Pre-existing
_, err = clientSet.CoreV1().Endpoints(ep.Namespace).Update(newEndpoints)
_, err = clientSet.CoreV1().Endpoints(ep.Namespace).Update(context.Background(), newEndpoints, metav1.UpdateOptions{})
logrus.Infof("Update endpoints for %v/%v", ep.Namespace, ep.Name)
}
if err != nil {
@ -268,10 +269,10 @@ func EnsureService(new *corev1.Service, clientSet kubernetes.Interface) error {
// EnsureHPA -
func EnsureHPA(new *autoscalingv2.HorizontalPodAutoscaler, clientSet kubernetes.Interface) {
_, err := clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Get(new.Name, metav1.GetOptions{})
_, err := clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Get(context.Background(), new.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err = clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Create(new)
_, err = clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Create(context.Background(), new, metav1.CreateOptions{})
if err != nil {
logrus.Warningf("error creating hpa %+v: %v", new, err)
}
@ -280,7 +281,7 @@ func EnsureHPA(new *autoscalingv2.HorizontalPodAutoscaler, clientSet kubernetes.
logrus.Errorf("error getting hpa(%s): %v", fmt.Sprintf("%s/%s", new.Namespace, new.Name), err)
return
}
_, err = clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Update(new)
_, err = clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(new.Namespace).Update(context.Background(), new, metav1.UpdateOptions{})
if err != nil {
logrus.Warningf("error updating hpa %+v: %v", new, err)
return
@ -300,7 +301,7 @@ func UpgradeIngress(clientset kubernetes.Interface,
if o, ok := oldMap[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Update(n)
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error updating ingress: %+v: err: %v",
ing, err), err); err != nil {
@ -313,7 +314,7 @@ func UpgradeIngress(clientset kubernetes.Interface,
logrus.Debugf("ServiceID: %s; successfully update ingress: %s", as.ServiceID, ing.Name)
} else {
logrus.Debugf("ingress: %+v", n)
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Create(n)
ing, err := clientset.ExtensionsV1beta1().Ingresses(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating ingress: %+v: err: %v",
ing, err), err); err != nil {
@ -327,8 +328,8 @@ func UpgradeIngress(clientset kubernetes.Interface,
}
for _, ing := range oldMap {
if ing != nil {
if err := clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name,
&metav1.DeleteOptions{}); err != nil {
if err := clientset.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(context.Background(), ing.Name,
metav1.DeleteOptions{}); err != nil {
if err := handleErr(fmt.Sprintf("error deleting ingress: %+v: err: %v",
ing, err), err); err != nil {
return err
@ -353,7 +354,7 @@ func UpgradeSecrets(clientset kubernetes.Interface,
if o, ok := oldMap[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
sec, err := clientset.CoreV1().Secrets(n.Namespace).Update(n)
sec, err := clientset.CoreV1().Secrets(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error updating secret: %+v: err: %v",
sec, err), err); err != nil {
@ -365,7 +366,7 @@ func UpgradeSecrets(clientset kubernetes.Interface,
delete(oldMap, o.Name)
logrus.Debugf("ServiceID: %s; successfully update secret: %s", as.ServiceID, sec.Name)
} else {
sec, err := clientset.CoreV1().Secrets(n.Namespace).Create(n)
sec, err := clientset.CoreV1().Secrets(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating secret: %+v: err: %v",
sec, err), err); err != nil {
@ -379,7 +380,7 @@ func UpgradeSecrets(clientset kubernetes.Interface,
}
for _, sec := range oldMap {
if sec != nil {
if err := clientset.CoreV1().Secrets(sec.Namespace).Delete(sec.Name, &metav1.DeleteOptions{}); err != nil {
if err := clientset.CoreV1().Secrets(sec.Namespace).Delete(context.Background(), sec.Name, metav1.DeleteOptions{}); err != nil {
if err := handleErr(fmt.Sprintf("error deleting secret: %+v: err: %v",
sec, err), err); err != nil {
return err
@ -402,7 +403,7 @@ func UpgradeClaims(clientset *kubernetes.Clientset, as *v1.AppService, old, new
if o, ok := oldMap[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
claim, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Update(n)
claim, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error updating claim: %+v: err: %v", claim, err), err); err != nil {
return err
@ -413,10 +414,10 @@ func UpgradeClaims(clientset *kubernetes.Clientset, as *v1.AppService, old, new
delete(oldMap, o.Name)
logrus.Debugf("ServiceID: %s; successfully update claim: %s", as.ServiceID, claim.Name)
} else {
claim, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Get(n.Name, metav1.GetOptions{})
claim, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Get(context.Background(), n.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Create(n)
_, err := clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating claim: %+v: err: %v",
n, err), err); err != nil {
@ -433,7 +434,7 @@ func UpgradeClaims(clientset *kubernetes.Clientset, as *v1.AppService, old, new
if claim != nil {
logrus.Infof("claim is exists, do not create again, and can't update it: %s", claim.Name)
} else {
claim, err = clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Update(n)
claim, err = clientset.CoreV1().PersistentVolumeClaims(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error update claim: %+v: err: %v", claim, err), err); err != nil {
return err
@ -447,7 +448,7 @@ func UpgradeClaims(clientset *kubernetes.Clientset, as *v1.AppService, old, new
}
for _, claim := range oldMap {
if claim != nil {
if err := clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{}); err != nil {
if err := clientset.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.Background(), claim.Name, metav1.DeleteOptions{}); err != nil {
if err := handleErr(fmt.Sprintf("error deleting claim: %+v: err: %v", claim, err), err); err != nil {
return err
}
@ -469,10 +470,10 @@ func UpgradeEndpoints(clientset kubernetes.Interface,
}
for _, n := range new {
if o, ok := oldMap[n.Name]; ok {
oldEndpoint, err := clientset.CoreV1().Endpoints(n.Namespace).Get(n.Name, metav1.GetOptions{})
oldEndpoint, err := clientset.CoreV1().Endpoints(n.Namespace).Get(context.Background(), n.Name, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
_, err := clientset.CoreV1().Endpoints(n.Namespace).Create(n)
_, err := clientset.CoreV1().Endpoints(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating endpoints: %+v: err: %v",
n, err), err); err != nil {
@ -486,7 +487,7 @@ func UpgradeEndpoints(clientset kubernetes.Interface,
}
}
n.ResourceVersion = oldEndpoint.ResourceVersion
ep, err := clientset.CoreV1().Endpoints(n.Namespace).Update(n)
ep, err := clientset.CoreV1().Endpoints(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if e := handleErr(fmt.Sprintf("error updating endpoints: %+v: err: %v",
ep, err), err); e != nil {
@ -498,7 +499,7 @@ func UpgradeEndpoints(clientset kubernetes.Interface,
delete(oldMap, o.Name)
logrus.Debugf("ServiceID: %s; successfully update endpoints: %s", as.ServiceID, ep.Name)
} else {
_, err := clientset.CoreV1().Endpoints(n.Namespace).Create(n)
_, err := clientset.CoreV1().Endpoints(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating endpoints: %+v: err: %v",
n, err), err); err != nil {
@ -512,7 +513,7 @@ func UpgradeEndpoints(clientset kubernetes.Interface,
}
for _, sec := range oldMap {
if sec != nil {
if err := clientset.CoreV1().Endpoints(sec.Namespace).Delete(sec.Name, &metav1.DeleteOptions{}); err != nil {
if err := clientset.CoreV1().Endpoints(sec.Namespace).Delete(context.Background(), sec.Name, metav1.DeleteOptions{}); err != nil {
if err := handleErr(fmt.Sprintf("error deleting endpoints: %+v: err: %v",
sec, err), err); err != nil {
return err
@ -540,7 +541,7 @@ func UpgradeServiceMonitor(
if o, ok := oldMap[n.Name]; ok {
n.UID = o.UID
n.ResourceVersion = o.ResourceVersion
ing, err := clientset.MonitoringV1().ServiceMonitors(n.Namespace).Update(n)
ing, err := clientset.MonitoringV1().ServiceMonitors(n.Namespace).Update(context.Background(), n, metav1.UpdateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error updating service monitor: %+v: err: %v",
ing, err), err); err != nil {
@ -552,7 +553,7 @@ func UpgradeServiceMonitor(
delete(oldMap, o.Name)
logrus.Debugf("ServiceID: %s; successfully update service monitor: %s", as.ServiceID, ing.Name)
} else {
ing, err := clientset.MonitoringV1().ServiceMonitors(n.Namespace).Create(n)
ing, err := clientset.MonitoringV1().ServiceMonitors(n.Namespace).Create(context.Background(), n, metav1.CreateOptions{})
if err != nil {
if err := handleErr(fmt.Sprintf("error creating service monitor: %+v: err: %v",
ing, err), err); err != nil {
@ -566,8 +567,8 @@ func UpgradeServiceMonitor(
}
for _, ing := range oldMap {
if ing != nil {
if err := clientset.MonitoringV1().ServiceMonitors(ing.Namespace).Delete(ing.Name,
&metav1.DeleteOptions{}); err != nil {
if err := clientset.MonitoringV1().ServiceMonitors(ing.Namespace).Delete(context.Background(), ing.Name,
metav1.DeleteOptions{}); err != nil {
if err := handleErr(fmt.Sprintf("error deleting service monitor: %+v: err: %v",
ing, err), err); err != nil {
return err
@ -582,18 +583,18 @@ func UpgradeServiceMonitor(
// CreateOrUpdateSecret creates or updates secret.
func CreateOrUpdateSecret(clientset kubernetes.Interface, secret *corev1.Secret) error {
old, err := clientset.CoreV1().Secrets(secret.Namespace).Get(secret.Name, metav1.GetOptions{})
old, err := clientset.CoreV1().Secrets(secret.Namespace).Get(context.Background(), secret.Name, metav1.GetOptions{})
if err != nil {
if !k8sErrors.IsNotFound(err) {
return err
}
// create secret
_, err := clientset.CoreV1().Secrets(secret.Namespace).Create(secret)
_, err := clientset.CoreV1().Secrets(secret.Namespace).Create(context.Background(), secret, metav1.CreateOptions{})
return err
}
// update secret
secret.ResourceVersion = old.ResourceVersion
_, err = clientset.CoreV1().Secrets(secret.Namespace).Update(secret)
_, err = clientset.CoreV1().Secrets(secret.Namespace).Update(context.Background(), secret, metav1.UpdateOptions{})
return err
}

View File

@ -3,10 +3,10 @@ package store
import (
"time"
externalversions "github.com/coreos/prometheus-operator/pkg/client/informers/externalversions"
"github.com/coreos/prometheus-operator/pkg/client/versioned"
externalversions "github.com/prometheus-operator/prometheus-operator/pkg/client/informers/externalversions"
"github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
"github.com/sirupsen/logrus"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
)
//ServiceMonitor service monitor custom resource

View File

@ -19,7 +19,7 @@
package store
import (
crdlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/internalversion"
crdlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1"
appsv1 "k8s.io/client-go/listers/apps/v1"
autoscalingv2 "k8s.io/client-go/listers/autoscaling/v2beta2"
corev1 "k8s.io/client-go/listers/core/v1"

View File

@ -29,10 +29,9 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
monitorv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/coreos/prometheus-operator/pkg/client/versioned"
"github.com/goodrain/rainbond/util/constants"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
"k8s.io/apimachinery/pkg/types"
"github.com/eapache/channels"
@ -51,8 +50,9 @@ import (
corev1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
internalinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
internalclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
internalinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -179,8 +179,8 @@ func NewStore(
if crdClient != nil {
store.crdClient = crdClient
crdFactory := internalinformers.NewSharedInformerFactory(crdClient, 5*time.Minute)
store.informers.CRD = crdFactory.Apiextensions().InternalVersion().CustomResourceDefinitions().Informer()
store.listers.CRD = crdFactory.Apiextensions().InternalVersion().CustomResourceDefinitions().Lister()
store.informers.CRD = crdFactory.Apiextensions().V1().CustomResourceDefinitions().Informer()
store.listers.CRD = crdFactory.Apiextensions().V1().CustomResourceDefinitions().Lister()
}
// create informers factory, enable and assign required informers
@ -407,13 +407,13 @@ func upgradeProbe(ch chan<- interface{}, old, cur []*ProbeInfo) {
func (a *appRuntimeStore) init() error {
//init leader namespace
leaderNamespace := a.conf.LeaderElectionNamespace
if _, err := a.conf.KubeClient.CoreV1().Namespaces().Get(leaderNamespace, metav1.GetOptions{}); err != nil {
if _, err := a.conf.KubeClient.CoreV1().Namespaces().Get(context.Background(), leaderNamespace, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
_, err = a.conf.KubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{
_, err = a.conf.KubeClient.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: leaderNamespace,
},
})
}, metav1.CreateOptions{})
}
if err != nil {
return err
@ -498,7 +498,7 @@ func (a *appRuntimeStore) checkReplicasetWhetherDelete(app *v1.AppService, rs *a
//delete old version
if v1.GetReplicaSetVersion(current) > v1.GetReplicaSetVersion(rs) {
if rs.Status.Replicas == 0 && rs.Status.ReadyReplicas == 0 && rs.Status.AvailableReplicas == 0 {
if err := a.conf.KubeClient.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, &metav1.DeleteOptions{}); err != nil && errors.IsNotFound(err) {
if err := a.conf.KubeClient.AppsV1().ReplicaSets(rs.Namespace).Delete(context.Background(), rs.Name, metav1.DeleteOptions{}); err != nil && errors.IsNotFound(err) {
logrus.Errorf("delete old version replicaset failure %s", err.Error())
}
}
@ -514,7 +514,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.AppsV1().Deployments(deployment.Namespace).Delete(context.Background(), deployment.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetDeployment(deployment)
@ -529,7 +529,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.AppsV1().StatefulSets(statefulset.Namespace).Delete(statefulset.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.AppsV1().StatefulSets(statefulset.Namespace).Delete(context.Background(), statefulset.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetStatefulSet(statefulset)
@ -544,7 +544,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.AppsV1().Deployments(replicaset.Namespace).Delete(replicaset.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.AppsV1().Deployments(replicaset.Namespace).Delete(context.Background(), replicaset.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetReplicaSets(replicaset)
@ -560,7 +560,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().Secrets(secret.Namespace).Delete(context.Background(), secret.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetSecret(secret)
@ -575,7 +575,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().Services(service.Namespace).Delete(service.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().Services(service.Namespace).Delete(context.Background(), service.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetService(service)
@ -590,7 +590,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Delete(ingress.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Delete(context.Background(), ingress.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetIngress(ingress)
@ -605,7 +605,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().ConfigMaps(configmap.Namespace).Delete(context.Background(), configmap.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetConfigMap(configmap)
@ -620,7 +620,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(hpa.GetName(), &metav1.DeleteOptions{})
a.conf.KubeClient.AutoscalingV2beta2().HorizontalPodAutoscalers(hpa.GetNamespace()).Delete(context.Background(), hpa.GetName(), metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetHPA(hpa)
@ -645,7 +645,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
if serviceID != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.Background(), claim.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetClaim(claim)
@ -665,7 +665,7 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
logrus.Errorf("create service monitor client failure %s", err.Error())
}
if smClient != nil {
err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Delete(sm.GetName(), &metav1.DeleteOptions{})
err := smClient.MonitoringV1().ServiceMonitors(sm.GetNamespace()).Delete(context.Background(), sm.GetName(), metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logrus.Errorf("delete service monitor failure: %s", err.Error())
}
@ -679,21 +679,6 @@ func (a *appRuntimeStore) OnAdd(obj interface{}) {
}
}
func (a *appRuntimeStore) listHPAEvents(hpa *autoscalingv2.HorizontalPodAutoscaler) error {
namespace, name := hpa.GetNamespace(), hpa.GetName()
eventsInterface := a.clientset.CoreV1().Events(hpa.GetNamespace())
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err := eventsInterface.List(options)
if err != nil {
return err
}
_ = events
return nil
}
//getAppService if creator is true, will create new app service where not found in store
func (a *appRuntimeStore) getAppService(serviceID, version, createrID string, creator bool) (*v1.AppService, error) {
var appservice *v1.AppService
@ -1249,7 +1234,7 @@ func (a *appRuntimeStore) podEventHandler() cache.ResourceEventHandlerFuncs {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetPods(pod)
@ -1277,7 +1262,7 @@ func (a *appRuntimeStore) podEventHandler() cache.ResourceEventHandlerFuncs {
if serviceID != "" && version != "" && createrID != "" {
appservice, err := a.getAppService(serviceID, version, createrID, true)
if err == conversion.ErrServiceNotFound {
a.conf.KubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
a.conf.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
}
if appservice != nil {
appservice.SetPods(pod)
@ -1519,7 +1504,7 @@ func (a *appRuntimeStore) createOrUpdateImagePullSecret(ns string) error {
Data: rawSecret.Data,
Type: rawSecret.Type,
}
_, err := a.clientset.CoreV1().Secrets(ns).Create(curSecret)
_, err := a.clientset.CoreV1().Secrets(ns).Create(context.Background(), curSecret, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("create secret for pulling images: %v", err)
}
@ -1536,7 +1521,7 @@ func (a *appRuntimeStore) createOrUpdateImagePullSecret(ns string) error {
// if the raw secret is different from the current one, then update the current one.
curSecret.Data = rawSecret.Data
if _, err := a.clientset.CoreV1().Secrets(ns).Update(curSecret); err != nil {
if _, err := a.clientset.CoreV1().Secrets(ns).Update(context.Background(), curSecret, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("update secret for pulling images: %v", err)
}
logrus.Infof("successfully update secret: %s", types.NamespacedName{Namespace: ns, Name: imagePullSecretName}.String())

View File

@ -19,7 +19,9 @@
package store
import (
"github.com/goodrain/rainbond/worker/appm/types/v1"
"context"
v1 "github.com/goodrain/rainbond/worker/appm/types/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -27,9 +29,9 @@ import (
//InitStorageclass init storage class
func (a *appRuntimeStore) initStorageclass() error {
for _, storageclass := range v1.GetInitStorageClass() {
if _, err := a.conf.KubeClient.StorageV1().StorageClasses().Get(storageclass.Name, metav1.GetOptions{}); err != nil {
if _, err := a.conf.KubeClient.StorageV1().StorageClasses().Get(context.Background(), storageclass.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
_, err = a.conf.KubeClient.StorageV1().StorageClasses().Create(storageclass)
_, err = a.conf.KubeClient.StorageV1().StorageClasses().Create(context.Background(), storageclass, metav1.CreateOptions{})
}
if err != nil {
return err

View File

@ -19,6 +19,7 @@
package thirdparty
import (
"context"
"fmt"
"github.com/eapache/channels"
@ -538,7 +539,7 @@ func (t *thirdparty) runDelete(sid string) {
if eps := as.GetEndpoints(true); eps != nil {
for _, ep := range eps {
logrus.Debugf("Endpoints delete: %+v", ep)
err := t.clientset.CoreV1().Endpoints(as.TenantID).Delete(ep.Name, &metav1.DeleteOptions{})
err := t.clientset.CoreV1().Endpoints(as.TenantID).Delete(context.Background(), ep.Name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
logrus.Warningf("error deleting endpoint empty old app endpoints: %v", err)
}

View File

@ -23,7 +23,7 @@ import (
"os"
"strconv"
monitorv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
monitorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"

View File

@ -19,6 +19,7 @@
package gc
import (
"context"
"fmt"
"os"
"path"
@ -79,53 +80,53 @@ func (g *GarbageCollector) DelVolumeData(serviceGCReq model.ServiceGCTaskBody) {
// DelPvPvcByServiceID -
func (g *GarbageCollector) DelPvPvcByServiceID(serviceGCReq model.ServiceGCTaskBody) {
logrus.Infof("service_id: %s", serviceGCReq.ServiceID)
deleteOpts := &metav1.DeleteOptions{}
deleteOpts := metav1.DeleteOptions{}
listOpts := g.listOptionsServiceID(serviceGCReq.ServiceID)
if err := g.clientset.CoreV1().PersistentVolumes().DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.CoreV1().PersistentVolumes().DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("service id: %s; delete a collection fo PV: %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.CoreV1().PersistentVolumeClaims(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.CoreV1().PersistentVolumeClaims(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("service id: %s; delete a collection fo PVC: %v", serviceGCReq.ServiceID, err)
}
}
// DelKubernetesObjects deletes all kubernetes objects.
func (g *GarbageCollector) DelKubernetesObjects(serviceGCReq model.ServiceGCTaskBody) {
deleteOpts := &metav1.DeleteOptions{}
deleteOpts := metav1.DeleteOptions{}
listOpts := g.listOptionsServiceID(serviceGCReq.ServiceID)
if err := g.clientset.AppsV1().Deployments(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.AppsV1().Deployments(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete deployments(%s): %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.AppsV1().StatefulSets(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.AppsV1().StatefulSets(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete statefulsets(%s): %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.ExtensionsV1beta1().Ingresses(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.ExtensionsV1beta1().Ingresses(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete ingresses(%s): %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.CoreV1().Secrets(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.CoreV1().Secrets(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete secrets(%s): %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.CoreV1().ConfigMaps(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.CoreV1().ConfigMaps(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete configmaps(%s): %v", serviceGCReq.ServiceID, err)
}
if err := g.clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.AutoscalingV2beta2().HorizontalPodAutoscalers(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete hpas(%s): %v", serviceGCReq.ServiceID, err)
}
// kubernetes does not support api for deleting collection of service
// read: https://github.com/kubernetes/kubernetes/issues/68468#issuecomment-419981870
serviceList, err := g.clientset.CoreV1().Services(serviceGCReq.TenantID).List(listOpts)
serviceList, err := g.clientset.CoreV1().Services(serviceGCReq.TenantID).List(context.Background(), listOpts)
if err != nil {
logrus.Warningf("[DelKubernetesObjects] list services(%s): %v", serviceGCReq.ServiceID, err)
} else {
for _, svc := range serviceList.Items {
if err := g.clientset.CoreV1().Services(serviceGCReq.TenantID).Delete(svc.Name, deleteOpts); err != nil {
if err := g.clientset.CoreV1().Services(serviceGCReq.TenantID).Delete(context.Background(), svc.Name, deleteOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete service(%s): %v", svc.GetName(), err)
}
}
}
// delete endpoints after deleting services
if err := g.clientset.CoreV1().Endpoints(serviceGCReq.TenantID).DeleteCollection(deleteOpts, listOpts); err != nil {
if err := g.clientset.CoreV1().Endpoints(serviceGCReq.TenantID).DeleteCollection(context.Background(), deleteOpts, listOpts); err != nil {
logrus.Warningf("[DelKubernetesObjects] delete endpoints(%s): %v", serviceGCReq.ServiceID, err)
}
}

View File

@ -523,7 +523,7 @@ func (m *Manager) deleteTenant(task *model.Task) (err error) {
}
}()
if err = m.cfg.KubeClient.CoreV1().Namespaces().Delete(body.TenantID, &metav1.DeleteOptions{
if err = m.cfg.KubeClient.CoreV1().Namespaces().Delete(context.Background(), body.TenantID, metav1.DeleteOptions{
GracePeriodSeconds: util.Int64(0),
}); err != nil && !k8sErrors.IsNotFound(err) {
err = fmt.Errorf("delete namespace: %v", err)

View File

@ -21,7 +21,6 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/goodrain/rainbond/cmd/worker/option"
"io/ioutil"
"net/http"
"os"
@ -29,6 +28,8 @@ import (
"sync"
"time"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/sirupsen/logrus"
"golang.org/x/time/rate"
v1 "k8s.io/api/core/v1"
@ -773,7 +774,9 @@ func (ctrl *ProvisionController) syncClaim(obj interface{}) error {
if ctrl.shouldProvision(claim) {
startTime := time.Now()
err := ctrl.provisionClaimOperation(claim)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*100)
defer cancel()
err := ctrl.provisionClaimOperation(ctx, claim)
ctrl.updateProvisionStats(claim, err, startTime)
return err
}
@ -786,10 +789,11 @@ func (ctrl *ProvisionController) syncVolume(obj interface{}) error {
if !ok {
return fmt.Errorf("expected volume but got %+v", obj)
}
if ctrl.shouldDelete(volume) {
startTime := time.Now()
err := ctrl.deleteVolumeOperation(volume)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
err := ctrl.deleteVolumeOperation(ctx, volume)
ctrl.updateDeleteStats(volume, err, startTime)
return err
}
@ -927,7 +931,7 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
// provisionClaimOperation attempts to provision a volume for the given claim.
// Returns error, which indicates whether provisioning should be retried
// (requeue the claim) or not
func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) error {
func (ctrl *ProvisionController) provisionClaimOperation(ctx context.Context, claim *v1.PersistentVolumeClaim) error {
// Most code here is identical to that found in controller.go of kube's PV controller...
claimClass := GetPersistentVolumeClaimClass(claim)
operation := fmt.Sprintf("provision %q class %q", claimToClaimKey(claim), claimClass)
@ -937,7 +941,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
// the locks. Check that PV (with deterministic name) hasn't been provisioned
// yet.
pvName := ctrl.getProvisionedVolumeNameForClaim(claim)
volume, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
volume, err := ctrl.client.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err == nil && volume != nil {
// Volume has been already provisioned, nothing to do.
logrus.Info(logOperation(operation, "persistentvolume %q already exists, skipping", pvName))
@ -990,7 +994,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
if ctrl.kubeVersion.AtLeast(utilversion.MustParseSemantic("v1.11.0")) {
// Get SelectedNode
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
selectedNode, err = ctrl.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes
selectedNode, err = ctrl.client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) // TODO (verult) cache Nodes
if err != nil {
err = fmt.Errorf("failed to get target node: %v", err)
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, "ProvisioningFailed", err.Error())
@ -1008,7 +1012,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
}
// Find pv for grdata
grdatapv, err := ctrl.persistentVolumeForGrdata()
grdatapv, err := ctrl.persistentVolumeForGrdata(ctx)
if err != nil {
return fmt.Errorf("pv for grdata: %v", err)
}
@ -1057,7 +1061,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
// Try to create the PV object several times
for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ {
logrus.Info(logOperation(operation, "trying to save persistentvolume %q", volume.Name))
if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) {
if _, err = ctrl.client.CoreV1().PersistentVolumes().Create(ctx, volume, metav1.CreateOptions{}); err == nil || apierrs.IsAlreadyExists(err) {
// Save succeeded.
if err != nil {
logrus.Info(logOperation(operation, "persistentvolume %q already exists, reusing", volume.Name))
@ -1108,12 +1112,12 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol
return nil
}
func (ctrl *ProvisionController) persistentVolumeForGrdata() (*v1.PersistentVolume, error) {
pvc, err := ctrl.client.CoreV1().PersistentVolumeClaims(ctrl.cfg.RBDNamespace).Get(ctrl.cfg.GrdataPVCName, metav1.GetOptions{})
func (ctrl *ProvisionController) persistentVolumeForGrdata(ctx context.Context) (*v1.PersistentVolume, error) {
pvc, err := ctrl.client.CoreV1().PersistentVolumeClaims(ctrl.cfg.RBDNamespace).Get(ctx, ctrl.cfg.GrdataPVCName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("find pvc for grdata: %v", err)
}
pv, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
pv, err := ctrl.client.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("find pv for grdata: %v", err)
}
@ -1126,7 +1130,7 @@ func (ctrl *ProvisionController) persistentVolumeForGrdata() (*v1.PersistentVolu
// deleteVolumeOperation attempts to delete the volume backing the given
// volume. Returns error, which indicates whether deletion should be retried
// (requeue the volume) or not
func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
func (ctrl *ProvisionController) deleteVolumeOperation(ctx context.Context, volume *v1.PersistentVolume) error {
operation := fmt.Sprintf("delete %q", volume.Name)
logrus.Info(logOperation(operation, "started"))
@ -1134,7 +1138,7 @@ func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolu
// Our check does not have to be as sophisticated as PV controller's, we can
// trust that the PV controller has set the PV to Released/Failed and it's
// ours to delete
newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(ctx, volume.Name, metav1.GetOptions{})
if err != nil {
return nil
}
@ -1159,7 +1163,7 @@ func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolu
logrus.Info(logOperation(operation, "volume deleted"))
// Delete the volume
if err = ctrl.client.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil {
if err = ctrl.client.CoreV1().PersistentVolumes().Delete(ctx, volume.Name, metav1.DeleteOptions{}); err != nil {
// Oops, could not delete the volume and therefore the controller will
// try to delete the volume again on next update.
logrus.Info(logOperation(operation, "failed to delete persistentvolume: %v", err))

View File

@ -26,6 +26,8 @@ import (
"strings"
"time"
"context"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/dao"
"github.com/goodrain/rainbond/node/nodem/client"
@ -61,8 +63,8 @@ func NewRainbondsslcProvisioner(kubecli kubernetes.Interface, store store.Storer
var _ controller.Provisioner = &rainbondsslcProvisioner{}
//selectNode select an appropriate node with the largest resource surplus
func (p *rainbondsslcProvisioner) selectNode(nodeOS, ignore string) (*v1.Node, error) {
allnode, err := p.kubecli.CoreV1().Nodes().List(metav1.ListOptions{})
func (p *rainbondsslcProvisioner) selectNode(ctx context.Context, nodeOS, ignore string) (*v1.Node, error) {
allnode, err := p.kubecli.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
@ -97,7 +99,7 @@ func (p *rainbondsslcProvisioner) selectNode(nodeOS, ignore string) (*v1.Node, e
}
//only contains rainbond pod
//pods, err := p.store.GetPodLister().Pods(v1.NamespaceAll).List(labels.NewSelector())
pods, err := p.kubecli.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
pods, err := p.kubecli.CoreV1().Pods(v1.NamespaceAll).List(ctx, metav1.ListOptions{
FieldSelector: "spec.nodeName=" + node.Name,
})
if err != nil {
@ -199,7 +201,7 @@ func (p *rainbondsslcProvisioner) Provision(options controller.VolumeOptions) (*
if options.Parameters != nil {
ignoreNodes = options.Parameters["ignoreNodes"]
}
options.SelectedNode, err = p.selectNode(options.PVC.Annotations[client.LabelOS], ignoreNodes)
options.SelectedNode, err = p.selectNode(context.Background(), options.PVC.Annotations[client.LabelOS], ignoreNodes)
if err != nil {
return nil, fmt.Errorf("Node OS: %s; error selecting node: %v",
options.PVC.Annotations[client.LabelOS], err)

View File

@ -19,6 +19,7 @@
package provider
import (
"context"
"testing"
"k8s.io/client-go/tools/clientcmd"
@ -36,7 +37,7 @@ func TestSelectNode(t *testing.T) {
name: "rainbond.io/provisioner-sslc",
kubecli: client,
}
node, err := pr.selectNode("linux", "")
node, err := pr.selectNode(context.TODO(), "linux", "")
if err != nil {
t.Fatal(err)
}

View File

@ -114,7 +114,7 @@ func (r RuntimeServer) listPodEventsByName(name, namespace string) []*pb.PodEven
eventsInterface := r.clientset.CoreV1().Events(namespace)
selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil)
options := metav1.ListOptions{FieldSelector: selector.String()}
events, err := eventsInterface.List(options)
events, err := eventsInterface.List(context.Background(), options)
if err == nil && len(events.Items) > 0 {
podEvents := DescribeEvents(events)
return podEvents

View File

@ -1,6 +1,7 @@
package util
import (
"context"
"fmt"
"github.com/sirupsen/logrus"
@ -22,14 +23,14 @@ func dns2Config(endpoint *corev1.Endpoints, podNamespace string) (podDNSConfig *
ndotsValue := "5"
return &corev1.PodDNSConfig{
Nameservers: servers,
Options: []corev1.PodDNSConfigOption{corev1.PodDNSConfigOption{Name: "ndots", Value: &ndotsValue}},
Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: &ndotsValue}},
Searches: []string{searchRBDDNS, "svc.cluster.local", "cluster.local"},
}
}
// MakePodDNSConfig make pod dns config
func MakePodDNSConfig(clientset kubernetes.Interface, podNamespace, rbdNamespace, rbdEndpointDNSName string) (podDNSConfig *corev1.PodDNSConfig) {
endpoints, err := clientset.CoreV1().Endpoints(rbdNamespace).Get(rbdEndpointDNSName, metav1.GetOptions{})
endpoints, err := clientset.CoreV1().Endpoints(rbdNamespace).Get(context.Background(), rbdEndpointDNSName, metav1.GetOptions{})
if err != nil {
logrus.Warningf("get rbd-dns[namespace: %s, name: %s] endpoints error: %s", rbdNamespace, rbdEndpointDNSName, err.Error())
return nil