diff --git a/api/handler/nodes.go b/api/handler/nodes.go index d73bfe3b6..97b89d54e 100644 --- a/api/handler/nodes.go +++ b/api/handler/nodes.go @@ -28,6 +28,8 @@ import ( const ( // NodeRolesLabelPrefix - NodeRolesLabelPrefix = "node-role.kubernetes.io" + // NodeRolesLabel - + NodeRolesLabel = "kubernetes.io/role" // NodeInternalIP - NodeInternalIP = "InternalIP" // NodeExternalIP - @@ -151,12 +153,16 @@ func (n *nodesHandle) HandleNodeInfo(node v1.Node) (nodeinfo model.NodeInfo, err } // get node roles var roles []string - for k := range node.Labels { + for k, v := range node.Labels { if strings.HasPrefix(k, NodeRolesLabelPrefix) { // string handle : node-role.kubernetes.io/worker: "true" role := strings.Split(k, "/")[1] roles = append(roles, role) } + if strings.HasPrefix(k, NodeRolesLabel) { + // string handle : kubernetes.io/role: master + roles = append(roles, v) + } continue } // req resource from Prometheus diff --git a/api/handler/resource.go b/api/handler/resource.go index d91779c3c..6339812b2 100644 --- a/api/handler/resource.go +++ b/api/handler/resource.go @@ -17,11 +17,12 @@ import ( yamlt "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/dynamic" "k8s.io/client-go/restmapper" + "strings" ) // AddAppK8SResource - func (c *clusterAction) AddAppK8SResource(ctx context.Context, namespace string, appID string, resourceYaml string) ([]*dbmodel.K8sResource, *util.APIHandleError) { - resourceObjects := c.HandleResourceYaml([]byte(resourceYaml), namespace, "create", "", map[string]string{"app_id": appID}) + resourceObjects := c.HandleResourceYaml([]byte(strings.TrimPrefix(resourceYaml, "\n")), namespace, "create", "", map[string]string{"app_id": appID}) var resourceList []*dbmodel.K8sResource for _, resourceObject := range resourceObjects { resource := resourceObject diff --git a/api/handler/yaml_resource.go b/api/handler/yaml_resource.go index a70dfc160..169e3af21 100644 --- a/api/handler/yaml_resource.go +++ b/api/handler/yaml_resource.go @@ -357,11 +357,12 @@ func (c *clusterAction) YamlToResource(yamlResource api_model.YamlResource, yaml var fileBuildResourceList []api_model.K8sResourceObject for _, yamlFilePath := range yamlFilesPath { var fileName string - yamlFileBytes := []byte(yamlContent) + yamlFileBytes := []byte(strings.TrimPrefix(yamlContent, "\n")) if yamlSource == api_model.YamlSourceFile { fileName = path.Base(yamlFilePath) var err error yamlFileBytes, err = ioutil.ReadFile(yamlFilePath) + yamlFileBytes = []byte(strings.TrimPrefix(string(yamlFileBytes), "\n")) if err != nil { logrus.Errorf("%v", err) fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{ diff --git a/builder/build/build.go b/builder/build/build.go index 58953dd7a..52f87237f 100644 --- a/builder/build/build.go +++ b/builder/build/build.go @@ -82,6 +82,7 @@ type Response struct { //Request build input type Request struct { KanikoImage string + InsecureBuild bool RbdNamespace string GRDataPVCName string CachePVCName string diff --git a/builder/build/code_build.go b/builder/build/code_build.go index cf93561ba..9dfd25e4f 100644 --- a/builder/build/code_build.go +++ b/builder/build/code_build.go @@ -142,7 +142,7 @@ func (s *slugBuild) buildRunnerImage(slugPackage string) (string, error) { return "", fmt.Errorf("pull image %s: %v", builder.RUNNERIMAGENAME, err) } logrus.Infof("pull image %s successfully.", builder.RUNNERIMAGENAME) - err := sources.ImageBuild(cacheDir, s.re.RbdNamespace, s.re.ServiceID, s.re.DeployVersion, s.re.Logger, "run-build", "", s.re.KanikoImage) + err := sources.ImageBuild(cacheDir, s.re.RbdNamespace, s.re.ServiceID, s.re.DeployVersion, s.re.Logger, "run-build", "", s.re.KanikoImage, s.re.InsecureBuild) if err != nil { s.re.Logger.Error(fmt.Sprintf("build image %s of new version failure", imageName), map[string]string{"step": "builder-exector", "status": "failure"}) logrus.Errorf("build image error: %s", err.Error()) diff --git a/builder/build/dockerfile_build.go b/builder/build/dockerfile_build.go index 7cf97cd59..77efdd26c 100644 --- a/builder/build/dockerfile_build.go +++ b/builder/build/dockerfile_build.go @@ -114,7 +114,7 @@ func (d *dockerfileBuild) runBuildJob(re *Request, buildImageName string) error Image: re.KanikoImage, Stdin: true, StdinOnce: true, - Args: []string{fmt.Sprintf("--context=%v", re.SourceDir), fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"}, + Args: []string{fmt.Sprintf("--context=%v", re.SourceDir), fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify", fmt.Sprintf("--insecure-pull=%v", re.InsecureBuild), fmt.Sprintf("--insecure=%v", re.InsecureBuild)}, } container.VolumeMounts = mounts podSpec.Containers = append(podSpec.Containers, container) diff --git a/builder/build/netcore_build.go b/builder/build/netcore_build.go index 59b0aeee0..33e074ecb 100644 --- a/builder/build/netcore_build.go +++ b/builder/build/netcore_build.go @@ -31,14 +31,14 @@ import ( ) var dockerfileTmpl = ` -FROM microsoft/dotnet:${DOTNET_SDK_VERSION:2.2-sdk-alpine} AS builder +FROM mcr.microsoft.com/dotnet/sdk:${DOTNET_SDK_VERSION:2.1-alpine} AS builder WORKDIR /app # copy csproj and restore as distinct layers COPY . . RUN ${DOTNET_RESTORE_PRE} && ${DOTNET_RESTORE:dotnet restore} && dotnet publish -c Release -o /out -FROM microsoft/dotnet:${DOTNET_RUNTIME_VERSION:2.2-aspnetcore-runtime-alpine} +FROM mcr.microsoft.com/dotnet/aspnet:${DOTNET_RUNTIME_VERSION:2.1-alpine} WORKDIR /app COPY --from=builder /out/ . CMD ["dotnet"] @@ -71,7 +71,7 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) { return nil, fmt.Errorf("write default dockerfile error:%s", err.Error()) } // build image - err := sources.ImageBuild(d.sourceDir, re.RbdNamespace, re.ServiceID, re.DeployVersion, re.Logger, "run-build", "", re.KanikoImage) + err := sources.ImageBuild(d.sourceDir, re.RbdNamespace, re.ServiceID, re.DeployVersion, re.Logger, "nc-build", "", re.KanikoImage, re.InsecureBuild) if err != nil { re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"}) logrus.Errorf("build image error: %s", err.Error()) diff --git a/builder/exector/build_from_sourcecode_run.go b/builder/exector/build_from_sourcecode_run.go index e89c632a2..ca33acd30 100644 --- a/builder/exector/build_from_sourcecode_run.go +++ b/builder/exector/build_from_sourcecode_run.go @@ -61,6 +61,7 @@ type SourceCodeBuildItem struct { TGZDir string `json:"tgz_dir"` ImageClient sources.ImageClient KanikoImage string + InsecureBuild bool KubeClient kubernetes.Interface RbdNamespace string RbdRepoName string @@ -303,6 +304,7 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) { return nil, err } buildReq := &build.Request{ + InsecureBuild: i.InsecureBuild, KanikoImage: i.KanikoImage, RbdNamespace: i.RbdNamespace, SourceDir: i.RepoInfo.GetCodeBuildAbsPath(), diff --git a/builder/exector/exector.go b/builder/exector/exector.go index 676579e14..12c571082 100644 --- a/builder/exector/exector.go +++ b/builder/exector/exector.go @@ -118,6 +118,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) { logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask) return &exectorManager{ KanikoImage: conf.KanikoImage, + InsecureBuild: conf.InsecureBuild, KubeClient: kubeClient, EtcdCli: etcdCli, mqClient: mqc, @@ -132,6 +133,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) { type exectorManager struct { KanikoImage string + InsecureBuild bool KubeClient kubernetes.Interface EtcdCli *clientv3.Client tasks chan *pb.TaskMessage @@ -334,6 +336,7 @@ func (e *exectorManager) buildFromSourceCode(task *pb.TaskMessage) { i := NewSouceCodeBuildItem(task.TaskBody) i.ImageClient = e.imageClient i.KanikoImage = e.KanikoImage + i.InsecureBuild = e.InsecureBuild i.KubeClient = e.KubeClient i.RbdNamespace = e.cfg.RbdNamespace i.RbdRepoName = e.cfg.RbdRepoName @@ -452,15 +455,15 @@ func (e *exectorManager) sendAction(tenantID, serviceID, eventID, newVersion, ac case "upgrade": //add upgrade event event := &dbmodel.ServiceEvent{ - EventID: util.NewUUID(), - TenantID: tenantID, - ServiceID: serviceID, - StartTime: time.Now().Format(time.RFC3339), - OptType: "upgrade", - Target: "service", - TargetID: serviceID, - UserName: "", - SynType: dbmodel.ASYNEVENTTYPE, + EventID: util.NewUUID(), + TenantID: tenantID, + ServiceID: serviceID, + StartTime: time.Now().Format(time.RFC3339), + OptType: "upgrade", + Target: "service", + TargetID: serviceID, + UserName: "", + SynType: dbmodel.ASYNEVENTTYPE, } if err := db.GetManager().ServiceEventDao().AddModel(event); err != nil { logrus.Errorf("create upgrade event failure %s, service %s do not auto upgrade", err.Error(), serviceID) diff --git a/builder/exector/plugin_dockerfile.go b/builder/exector/plugin_dockerfile.go index d9058c671..954ee7630 100644 --- a/builder/exector/plugin_dockerfile.go +++ b/builder/exector/plugin_dockerfile.go @@ -105,7 +105,7 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, logger event.Logger) n1 := strings.Split(mm[len(mm)-1], ".")[0] buildImageName := fmt.Sprintf(builder.REGISTRYDOMAIN+"/plugin_%s_%s:%s", n1, t.PluginID, t.DeployVersion) logger.Info("start build image", map[string]string{"step": "builder-exector"}) - err := sources.ImageBuild(sourceDir, "rbd-system", t.PluginID, t.DeployVersion, logger, "plug-build", buildImageName, e.KanikoImage) + err := sources.ImageBuild(sourceDir, "rbd-system", t.PluginID, t.DeployVersion, logger, "plug-build", buildImageName, e.KanikoImage, e.InsecureBuild) if err != nil { logger.Error(fmt.Sprintf("build image %s failure,find log in rbd-chaos", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"}) logrus.Errorf("[plugin]build image error: %s", err.Error()) diff --git a/builder/sources/image.go b/builder/sources/image.go index d61ebe209..c575029a6 100644 --- a/builder/sources/image.go +++ b/builder/sources/image.go @@ -390,7 +390,7 @@ func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { } //ImageBuild use kaniko build image -func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logger event.Logger, buildType, plugImageName, KanikoImage string) error { +func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logger event.Logger, buildType, plugImageName, KanikoImage string, InsecureBuild bool) error { // create image name var buildImageName string if buildType == "plug-build" { @@ -430,7 +430,7 @@ func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logge Image: KanikoImage, Stdin: true, StdinOnce: true, - Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"}, + Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify", fmt.Sprintf("--insecure-pull=%v", InsecureBuild), fmt.Sprintf("--insecure=%v", InsecureBuild)}, } container.VolumeMounts = volumeMounts podSpec.Containers = append(podSpec.Containers, container) @@ -703,6 +703,7 @@ func printLog(logger event.Logger, level, msg string, info map[string]string) { } } +// CreateImageName - func CreateImageName(ServiceID, DeployVersion string) string { imageName := strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, ServiceID, DeployVersion)) logrus.Info("imageName:", imageName) @@ -725,6 +726,7 @@ func CreateImageName(ServiceID, DeployVersion string) string { return strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, workloadName, DeployVersion)) } +// CreateVolumesAndMounts - func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volume, volumeMounts []corev1.VolumeMount) { pathSplit := strings.Split(contextDir, "/") subPath := strings.Join(pathSplit[2:], "/") @@ -784,6 +786,24 @@ func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volu } volumeMounts = append(volumeMounts, volumeMount) } + if buildType == "nc-build" { + volume := corev1.Volume{ + Name: "nc-build", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/cache", + Type: &hostPathType, + }, + }, + } + volumes = append(volumes, volume) + volumeMount := corev1.VolumeMount{ + Name: "nc-build", + MountPath: "/workspace", + SubPath: subPath, + } + volumeMounts = append(volumeMounts, volumeMount) + } if buildType == "run-build" { volume := corev1.Volume{ Name: "run-build", @@ -804,6 +824,7 @@ func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volu return volumes, volumeMounts } +// WaitingComplete - func WaitingComplete(reChan *channels.RingChannel) (err error) { var logComplete = false var jobComplete = false diff --git a/cmd/builder/option/option.go b/cmd/builder/option/option.go index 2f259940f..d43870867 100644 --- a/cmd/builder/option/option.go +++ b/cmd/builder/option/option.go @@ -39,6 +39,7 @@ type Config struct { ClusterName string MysqlConnectionInfo string KanikoImage string + InsecureBuild bool DBType string PrometheusMetricPath string EventLogServers []string @@ -105,6 +106,7 @@ func (a *Builder) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&a.CachePath, "cache-path", "/cache", "volume cache mount path, when cache-mode using hostpath, default path is /cache") fs.StringVar(&a.ContainerRuntime, "container-runtime", sources.ContainerRuntimeContainerd, "container runtime, support docker and containerd") fs.StringVar(&a.RuntimeEndpoint, "runtime-endpoint", sources.RuntimeEndpointContainerd, "container runtime endpoint") + fs.BoolVar(&a.InsecureBuild, "insecure-build", false, "kaniko build image policy http or https, true is http") } //SetLog 设置log diff --git a/node/nodem/envoy/conver/cds_conver.go b/node/nodem/envoy/conver/cds_conver.go index 3094df1ac..b6d7e97e1 100644 --- a/node/nodem/envoy/conver/cds_conver.go +++ b/node/nodem/envoy/conver/cds_conver.go @@ -35,7 +35,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -//OneNodeCluster conver cluster of on envoy node +// OneNodeCluster conver cluster of on envoy node func OneNodeCluster(serviceAlias, namespace string, configs *corev1.ConfigMap, services []*corev1.Service) ([]types.Resource, error) { resources, _, err := GetPluginConfigs(configs) if err != nil { @@ -147,8 +147,8 @@ func transportSocket(name, domain string) *core.TransportSocket { } } -//downstreamClusters handle app self cluster -//only local port +// downstreamClusters handle app self cluster +// only local port func downstreamClusters(serviceAlias, namespace string, ports []*api_model.BasePort) (cdsClusters []*v2.Cluster) { for i := range ports { port := ports[i] diff --git a/node/nodem/envoy/conver/eds_conver.go b/node/nodem/envoy/conver/eds_conver.go index 1e0b43748..322aaac45 100644 --- a/node/nodem/envoy/conver/eds_conver.go +++ b/node/nodem/envoy/conver/eds_conver.go @@ -31,7 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -//OneNodeClusterLoadAssignment one envoy node endpoints +// OneNodeClusterLoadAssignment one envoy node endpoints func OneNodeClusterLoadAssignment(serviceAlias, namespace string, endpoints []*corev1.Endpoints, services []*corev1.Service) (clusterLoadAssignment []types.Resource) { for i := range services { if domain, ok := services[i].Annotations["domain"]; ok && domain != "" { @@ -108,9 +108,24 @@ func OneNodeClusterLoadAssignment(serviceAlias, namespace string, endpoints []*c } for _, p := range service.Spec.Ports { clusterName := fmt.Sprintf("%s_%s_%s_%d", namespace, serviceAlias, destServiceAlias, p.Port) + var ( + newlendpoints []*endpoint.LocalityLbEndpoints + epPort uint32 + ) + for _, ep := range lendpoints { + if len(ep.LbEndpoints) > 0 { + epPort = ep.LbEndpoints[0].GetEndpoint().GetAddress().GetSocketAddress().GetPortValue() + } + if int32(epPort) != p.Port { + logrus.Debugf("endpoints port [%v] different service port [%v]", epPort, p.Port) + continue + } + newlendpoints = append(newlendpoints, ep) + } + cla := &v2.ClusterLoadAssignment{ ClusterName: clusterName, - Endpoints: lendpoints, + Endpoints: newlendpoints, } if err := cla.Validate(); err != nil { logrus.Errorf("endpoints discover validate failure %s", err.Error()) diff --git a/node/nodem/envoy/server_v2.go b/node/nodem/envoy/server_v2.go index 55eca902e..dc1f14a3c 100644 --- a/node/nodem/envoy/server_v2.go +++ b/node/nodem/envoy/server_v2.go @@ -48,7 +48,7 @@ import ( kcache "k8s.io/client-go/tools/cache" ) -//DiscoverServerManager envoy discover server +// DiscoverServerManager envoy discover server type DiscoverServerManager struct { server server.Server conf option.Conf @@ -78,7 +78,7 @@ func (h Hasher) ID(node *envoy_api_v2_core.Node) string { return node.Cluster } -//NodeConfig envoy node config cache struct +// NodeConfig envoy node config cache struct type NodeConfig struct { nodeID string namespace string @@ -90,13 +90,13 @@ type NodeConfig struct { listeners, clusters, endpoints []types.Resource } -//GetID get envoy node config id +// GetID get envoy node config id func (n *NodeConfig) GetID() string { return n.nodeID } -//TryUpdate try update resources, if don't care about,direct return false -//if return true, snapshot need update +// TryUpdate try update resources, if don't care about,direct return false +// if return true, snapshot need update func (n *NodeConfig) TryUpdate(obj interface{}) (needUpdate bool) { if service, ok := obj.(*corev1.Service); ok { if v, ok := service.Labels["creator"]; !ok || v != "Rainbond" { @@ -117,13 +117,13 @@ func (n *NodeConfig) TryUpdate(obj interface{}) (needUpdate bool) { return false } -//VersionUpdate add version index +// VersionUpdate add version index func (n *NodeConfig) VersionUpdate() { newVersion := atomic.AddInt64(&n.version, 1) n.version = newVersion } -//GetVersion get version +// GetVersion get version func (n *NodeConfig) GetVersion() string { return fmt.Sprintf("version_%d", n.version) } @@ -137,7 +137,7 @@ type cacheHandler struct { handler *ChainHandler } -//GetServicesAndEndpoints get service and endpoint +// GetServicesAndEndpoints get service and endpoint func (d *DiscoverServerManager) GetServicesAndEndpoints(namespace string, labelSelector labels.Selector) (ret []*corev1.Service, eret []*corev1.Endpoints) { kcache.ListAllByNamespace(d.services.informer.GetIndexer(), namespace, labelSelector, func(s interface{}) { ret = append(ret, s.(*corev1.Service)) @@ -148,7 +148,7 @@ func (d *DiscoverServerManager) GetServicesAndEndpoints(namespace string, labelS return } -//NewNodeConfig new NodeConfig +// NewNodeConfig new NodeConfig func (d *DiscoverServerManager) NewNodeConfig(config *corev1.ConfigMap) (*NodeConfig, error) { logrus.Debugf("cm name: %s; plugin-config: %s", config.GetName(), config.Data["plugin-config"]) servicaAlias := config.Labels["service_alias"] @@ -169,7 +169,7 @@ func (d *DiscoverServerManager) NewNodeConfig(config *corev1.ConfigMap) (*NodeCo return nc, nil } -//UpdateNodeConfig update node config +// UpdateNodeConfig update node config func (d *DiscoverServerManager) UpdateNodeConfig(nc *NodeConfig) error { var services []*corev1.Service var endpoint []*corev1.Endpoints @@ -256,7 +256,7 @@ func (d *DiscoverServerManager) setSnapshot(nc *NodeConfig) error { return nil } -//CreateDiscoverServerManager create discover server manager +// CreateDiscoverServerManager create discover server manager func CreateDiscoverServerManager(clientset kubernetes.Interface, conf option.Conf) (*DiscoverServerManager, error) { configcache := cache.NewSnapshotCache(false, Hasher{}, logrus.WithField("module", "config-cache")) ctx, cancel := context.WithCancel(context.Background()) @@ -292,7 +292,7 @@ func CreateDiscoverServerManager(clientset kubernetes.Interface, conf option.Con const grpcMaxConcurrentStreams = 1000000 -//Start server start +// Start server start func (d *DiscoverServerManager) Start(errch chan error) error { go func() { go d.queue.Run(d.ctx.Done()) @@ -331,7 +331,7 @@ func (d *DiscoverServerManager) Start(errch chan error) error { return nil } -//Stop stop grpc server +// Stop stop grpc server func (d *DiscoverServerManager) Stop() { //d.grpcServer.GracefulStop() d.cancel() @@ -387,7 +387,7 @@ func (d *DiscoverServerManager) createEDSCacheHandler(informer kcache.SharedInde return cacheHandler{informer: informer, handler: handler} } -//AddNodeConfig add node config cache +// AddNodeConfig add node config cache func (d *DiscoverServerManager) AddNodeConfig(nc *NodeConfig) { var exist bool for i, existNC := range d.cacheNodeConfig { @@ -406,7 +406,7 @@ func (d *DiscoverServerManager) AddNodeConfig(nc *NodeConfig) { } } -//DeleteNodeConfig delete node config cache +// DeleteNodeConfig delete node config cache func (d *DiscoverServerManager) DeleteNodeConfig(nodeID string) { for i, existNC := range d.cacheNodeConfig { if existNC.nodeID == nodeID { diff --git a/pkg/helm/helm.go b/pkg/helm/helm.go index eb0d27eb2..c96c497e1 100644 --- a/pkg/helm/helm.go +++ b/pkg/helm/helm.go @@ -194,7 +194,11 @@ func (h *Helm) install(name, chart, version string, overrides []string, dryRun b if err != nil { return nil, err } - + var crdYaml string + crds := chartRequested.CRDObjects() + for _, crd := range crds { + crdYaml += string(crd.File.Data) + } if err := checkIfInstallable(chartRequested); err != nil { return nil, err } @@ -231,8 +235,9 @@ func (h *Helm) install(name, chart, version string, overrides []string, dryRun b } } } - - return client.Run(chartRequested, vals) + rel, err := client.Run(chartRequested, vals) + rel.Manifest = strings.TrimPrefix(crdYaml+"\n"+rel.Manifest, "\n") + return rel, err } func (h *Helm) parseOverrides(overrides []string) (map[string]interface{}, error) { diff --git a/worker/appm/f/function.go b/worker/appm/f/function.go index 5b63a7861..1aa4733ec 100644 --- a/worker/appm/f/function.go +++ b/worker/appm/f/function.go @@ -191,7 +191,12 @@ func ensureService(new *corev1.Service, clientSet kubernetes.Interface) error { return err } updateService := old.DeepCopy() + var clusterIP string + if updateService.Spec.ClusterIP != "" { + clusterIP = updateService.Spec.ClusterIP + } updateService.Spec = new.Spec + updateService.Spec.ClusterIP = clusterIP updateService.Labels = new.Labels updateService.Annotations = new.Annotations return persistUpdate(updateService, clientSet)