Merge pull request #1557 from goodrain/fix-V5.11

fix: some product problems
This commit is contained in:
yangkaa 2023-02-17 14:39:00 +08:00 committed by GitHub
commit 4668fb2812
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 105 additions and 43 deletions

View File

@ -28,6 +28,8 @@ import (
const (
// NodeRolesLabelPrefix -
NodeRolesLabelPrefix = "node-role.kubernetes.io"
// NodeRolesLabel -
NodeRolesLabel = "kubernetes.io/role"
// NodeInternalIP -
NodeInternalIP = "InternalIP"
// NodeExternalIP -
@ -151,12 +153,16 @@ func (n *nodesHandle) HandleNodeInfo(node v1.Node) (nodeinfo model.NodeInfo, err
}
// get node roles
var roles []string
for k := range node.Labels {
for k, v := range node.Labels {
if strings.HasPrefix(k, NodeRolesLabelPrefix) {
// string handle : node-role.kubernetes.io/worker: "true"
role := strings.Split(k, "/")[1]
roles = append(roles, role)
}
if strings.HasPrefix(k, NodeRolesLabel) {
// string handle : kubernetes.io/role: master
roles = append(roles, v)
}
continue
}
// req resource from Prometheus

View File

@ -17,11 +17,12 @@ import (
yamlt "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/restmapper"
"strings"
)
// AddAppK8SResource -
func (c *clusterAction) AddAppK8SResource(ctx context.Context, namespace string, appID string, resourceYaml string) ([]*dbmodel.K8sResource, *util.APIHandleError) {
resourceObjects := c.HandleResourceYaml([]byte(resourceYaml), namespace, "create", "", map[string]string{"app_id": appID})
resourceObjects := c.HandleResourceYaml([]byte(strings.TrimPrefix(resourceYaml, "\n")), namespace, "create", "", map[string]string{"app_id": appID})
var resourceList []*dbmodel.K8sResource
for _, resourceObject := range resourceObjects {
resource := resourceObject

View File

@ -357,11 +357,12 @@ func (c *clusterAction) YamlToResource(yamlResource api_model.YamlResource, yaml
var fileBuildResourceList []api_model.K8sResourceObject
for _, yamlFilePath := range yamlFilesPath {
var fileName string
yamlFileBytes := []byte(yamlContent)
yamlFileBytes := []byte(strings.TrimPrefix(yamlContent, "\n"))
if yamlSource == api_model.YamlSourceFile {
fileName = path.Base(yamlFilePath)
var err error
yamlFileBytes, err = ioutil.ReadFile(yamlFilePath)
yamlFileBytes = []byte(strings.TrimPrefix(string(yamlFileBytes), "\n"))
if err != nil {
logrus.Errorf("%v", err)
fileBuildResourceList = append(fileBuildResourceList, api_model.K8sResourceObject{

View File

@ -82,6 +82,7 @@ type Response struct {
//Request build input
type Request struct {
KanikoImage string
InsecureBuild bool
RbdNamespace string
GRDataPVCName string
CachePVCName string

View File

@ -142,7 +142,7 @@ func (s *slugBuild) buildRunnerImage(slugPackage string) (string, error) {
return "", fmt.Errorf("pull image %s: %v", builder.RUNNERIMAGENAME, err)
}
logrus.Infof("pull image %s successfully.", builder.RUNNERIMAGENAME)
err := sources.ImageBuild(cacheDir, s.re.RbdNamespace, s.re.ServiceID, s.re.DeployVersion, s.re.Logger, "run-build", "", s.re.KanikoImage)
err := sources.ImageBuild(cacheDir, s.re.RbdNamespace, s.re.ServiceID, s.re.DeployVersion, s.re.Logger, "run-build", "", s.re.KanikoImage, s.re.InsecureBuild)
if err != nil {
s.re.Logger.Error(fmt.Sprintf("build image %s of new version failure", imageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())

View File

@ -114,7 +114,7 @@ func (d *dockerfileBuild) runBuildJob(re *Request, buildImageName string) error
Image: re.KanikoImage,
Stdin: true,
StdinOnce: true,
Args: []string{fmt.Sprintf("--context=%v", re.SourceDir), fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"},
Args: []string{fmt.Sprintf("--context=%v", re.SourceDir), fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify", fmt.Sprintf("--insecure-pull=%v", re.InsecureBuild), fmt.Sprintf("--insecure=%v", re.InsecureBuild)},
}
container.VolumeMounts = mounts
podSpec.Containers = append(podSpec.Containers, container)

View File

@ -31,14 +31,14 @@ import (
)
var dockerfileTmpl = `
FROM microsoft/dotnet:${DOTNET_SDK_VERSION:2.2-sdk-alpine} AS builder
FROM mcr.microsoft.com/dotnet/sdk:${DOTNET_SDK_VERSION:2.1-alpine} AS builder
WORKDIR /app
# copy csproj and restore as distinct layers
COPY . .
RUN ${DOTNET_RESTORE_PRE} && ${DOTNET_RESTORE:dotnet restore} && dotnet publish -c Release -o /out
FROM microsoft/dotnet:${DOTNET_RUNTIME_VERSION:2.2-aspnetcore-runtime-alpine}
FROM mcr.microsoft.com/dotnet/aspnet:${DOTNET_RUNTIME_VERSION:2.1-alpine}
WORKDIR /app
COPY --from=builder /out/ .
CMD ["dotnet"]
@ -71,7 +71,7 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
return nil, fmt.Errorf("write default dockerfile error:%s", err.Error())
}
// build image
err := sources.ImageBuild(d.sourceDir, re.RbdNamespace, re.ServiceID, re.DeployVersion, re.Logger, "run-build", "", re.KanikoImage)
err := sources.ImageBuild(d.sourceDir, re.RbdNamespace, re.ServiceID, re.DeployVersion, re.Logger, "nc-build", "", re.KanikoImage, re.InsecureBuild)
if err != nil {
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())

View File

@ -61,6 +61,7 @@ type SourceCodeBuildItem struct {
TGZDir string `json:"tgz_dir"`
ImageClient sources.ImageClient
KanikoImage string
InsecureBuild bool
KubeClient kubernetes.Interface
RbdNamespace string
RbdRepoName string
@ -303,6 +304,7 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
return nil, err
}
buildReq := &build.Request{
InsecureBuild: i.InsecureBuild,
KanikoImage: i.KanikoImage,
RbdNamespace: i.RbdNamespace,
SourceDir: i.RepoInfo.GetCodeBuildAbsPath(),

View File

@ -118,6 +118,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask)
return &exectorManager{
KanikoImage: conf.KanikoImage,
InsecureBuild: conf.InsecureBuild,
KubeClient: kubeClient,
EtcdCli: etcdCli,
mqClient: mqc,
@ -132,6 +133,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
type exectorManager struct {
KanikoImage string
InsecureBuild bool
KubeClient kubernetes.Interface
EtcdCli *clientv3.Client
tasks chan *pb.TaskMessage
@ -334,6 +336,7 @@ func (e *exectorManager) buildFromSourceCode(task *pb.TaskMessage) {
i := NewSouceCodeBuildItem(task.TaskBody)
i.ImageClient = e.imageClient
i.KanikoImage = e.KanikoImage
i.InsecureBuild = e.InsecureBuild
i.KubeClient = e.KubeClient
i.RbdNamespace = e.cfg.RbdNamespace
i.RbdRepoName = e.cfg.RbdRepoName
@ -452,15 +455,15 @@ func (e *exectorManager) sendAction(tenantID, serviceID, eventID, newVersion, ac
case "upgrade":
//add upgrade event
event := &dbmodel.ServiceEvent{
EventID: util.NewUUID(),
TenantID: tenantID,
ServiceID: serviceID,
StartTime: time.Now().Format(time.RFC3339),
OptType: "upgrade",
Target: "service",
TargetID: serviceID,
UserName: "",
SynType: dbmodel.ASYNEVENTTYPE,
EventID: util.NewUUID(),
TenantID: tenantID,
ServiceID: serviceID,
StartTime: time.Now().Format(time.RFC3339),
OptType: "upgrade",
Target: "service",
TargetID: serviceID,
UserName: "",
SynType: dbmodel.ASYNEVENTTYPE,
}
if err := db.GetManager().ServiceEventDao().AddModel(event); err != nil {
logrus.Errorf("create upgrade event failure %s, service %s do not auto upgrade", err.Error(), serviceID)

View File

@ -105,7 +105,7 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, logger event.Logger)
n1 := strings.Split(mm[len(mm)-1], ".")[0]
buildImageName := fmt.Sprintf(builder.REGISTRYDOMAIN+"/plugin_%s_%s:%s", n1, t.PluginID, t.DeployVersion)
logger.Info("start build image", map[string]string{"step": "builder-exector"})
err := sources.ImageBuild(sourceDir, "rbd-system", t.PluginID, t.DeployVersion, logger, "plug-build", buildImageName, e.KanikoImage)
err := sources.ImageBuild(sourceDir, "rbd-system", t.PluginID, t.DeployVersion, logger, "plug-build", buildImageName, e.KanikoImage, e.InsecureBuild)
if err != nil {
logger.Error(fmt.Sprintf("build image %s failure,find log in rbd-chaos", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("[plugin]build image error: %s", err.Error())

View File

@ -390,7 +390,7 @@ func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
}
//ImageBuild use kaniko build image
func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logger event.Logger, buildType, plugImageName, KanikoImage string) error {
func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logger event.Logger, buildType, plugImageName, KanikoImage string, InsecureBuild bool) error {
// create image name
var buildImageName string
if buildType == "plug-build" {
@ -430,7 +430,7 @@ func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logge
Image: KanikoImage,
Stdin: true,
StdinOnce: true,
Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"},
Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify", fmt.Sprintf("--insecure-pull=%v", InsecureBuild), fmt.Sprintf("--insecure=%v", InsecureBuild)},
}
container.VolumeMounts = volumeMounts
podSpec.Containers = append(podSpec.Containers, container)
@ -703,6 +703,7 @@ func printLog(logger event.Logger, level, msg string, info map[string]string) {
}
}
// CreateImageName -
func CreateImageName(ServiceID, DeployVersion string) string {
imageName := strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, ServiceID, DeployVersion))
logrus.Info("imageName:", imageName)
@ -725,6 +726,7 @@ func CreateImageName(ServiceID, DeployVersion string) string {
return strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, workloadName, DeployVersion))
}
// CreateVolumesAndMounts -
func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volume, volumeMounts []corev1.VolumeMount) {
pathSplit := strings.Split(contextDir, "/")
subPath := strings.Join(pathSplit[2:], "/")
@ -784,6 +786,24 @@ func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volu
}
volumeMounts = append(volumeMounts, volumeMount)
}
if buildType == "nc-build" {
volume := corev1.Volume{
Name: "nc-build",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/cache",
Type: &hostPathType,
},
},
}
volumes = append(volumes, volume)
volumeMount := corev1.VolumeMount{
Name: "nc-build",
MountPath: "/workspace",
SubPath: subPath,
}
volumeMounts = append(volumeMounts, volumeMount)
}
if buildType == "run-build" {
volume := corev1.Volume{
Name: "run-build",
@ -804,6 +824,7 @@ func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volu
return volumes, volumeMounts
}
// WaitingComplete -
func WaitingComplete(reChan *channels.RingChannel) (err error) {
var logComplete = false
var jobComplete = false

View File

@ -39,6 +39,7 @@ type Config struct {
ClusterName string
MysqlConnectionInfo string
KanikoImage string
InsecureBuild bool
DBType string
PrometheusMetricPath string
EventLogServers []string
@ -105,6 +106,7 @@ func (a *Builder) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&a.CachePath, "cache-path", "/cache", "volume cache mount path, when cache-mode using hostpath, default path is /cache")
fs.StringVar(&a.ContainerRuntime, "container-runtime", sources.ContainerRuntimeContainerd, "container runtime, support docker and containerd")
fs.StringVar(&a.RuntimeEndpoint, "runtime-endpoint", sources.RuntimeEndpointContainerd, "container runtime endpoint")
fs.BoolVar(&a.InsecureBuild, "insecure-build", false, "kaniko build image policy http or https, true is http")
}
//SetLog 设置log

View File

@ -35,7 +35,7 @@ import (
corev1 "k8s.io/api/core/v1"
)
//OneNodeCluster conver cluster of on envoy node
// OneNodeCluster conver cluster of on envoy node
func OneNodeCluster(serviceAlias, namespace string, configs *corev1.ConfigMap, services []*corev1.Service) ([]types.Resource, error) {
resources, _, err := GetPluginConfigs(configs)
if err != nil {
@ -147,8 +147,8 @@ func transportSocket(name, domain string) *core.TransportSocket {
}
}
//downstreamClusters handle app self cluster
//only local port
// downstreamClusters handle app self cluster
// only local port
func downstreamClusters(serviceAlias, namespace string, ports []*api_model.BasePort) (cdsClusters []*v2.Cluster) {
for i := range ports {
port := ports[i]

View File

@ -31,7 +31,7 @@ import (
corev1 "k8s.io/api/core/v1"
)
//OneNodeClusterLoadAssignment one envoy node endpoints
// OneNodeClusterLoadAssignment one envoy node endpoints
func OneNodeClusterLoadAssignment(serviceAlias, namespace string, endpoints []*corev1.Endpoints, services []*corev1.Service) (clusterLoadAssignment []types.Resource) {
for i := range services {
if domain, ok := services[i].Annotations["domain"]; ok && domain != "" {
@ -108,9 +108,24 @@ func OneNodeClusterLoadAssignment(serviceAlias, namespace string, endpoints []*c
}
for _, p := range service.Spec.Ports {
clusterName := fmt.Sprintf("%s_%s_%s_%d", namespace, serviceAlias, destServiceAlias, p.Port)
var (
newlendpoints []*endpoint.LocalityLbEndpoints
epPort uint32
)
for _, ep := range lendpoints {
if len(ep.LbEndpoints) > 0 {
epPort = ep.LbEndpoints[0].GetEndpoint().GetAddress().GetSocketAddress().GetPortValue()
}
if int32(epPort) != p.Port {
logrus.Debugf("endpoints port [%v] different service port [%v]", epPort, p.Port)
continue
}
newlendpoints = append(newlendpoints, ep)
}
cla := &v2.ClusterLoadAssignment{
ClusterName: clusterName,
Endpoints: lendpoints,
Endpoints: newlendpoints,
}
if err := cla.Validate(); err != nil {
logrus.Errorf("endpoints discover validate failure %s", err.Error())

View File

@ -48,7 +48,7 @@ import (
kcache "k8s.io/client-go/tools/cache"
)
//DiscoverServerManager envoy discover server
// DiscoverServerManager envoy discover server
type DiscoverServerManager struct {
server server.Server
conf option.Conf
@ -78,7 +78,7 @@ func (h Hasher) ID(node *envoy_api_v2_core.Node) string {
return node.Cluster
}
//NodeConfig envoy node config cache struct
// NodeConfig envoy node config cache struct
type NodeConfig struct {
nodeID string
namespace string
@ -90,13 +90,13 @@ type NodeConfig struct {
listeners, clusters, endpoints []types.Resource
}
//GetID get envoy node config id
// GetID get envoy node config id
func (n *NodeConfig) GetID() string {
return n.nodeID
}
//TryUpdate try update resources, if don't care about,direct return false
//if return true, snapshot need update
// TryUpdate try update resources, if don't care about,direct return false
// if return true, snapshot need update
func (n *NodeConfig) TryUpdate(obj interface{}) (needUpdate bool) {
if service, ok := obj.(*corev1.Service); ok {
if v, ok := service.Labels["creator"]; !ok || v != "Rainbond" {
@ -117,13 +117,13 @@ func (n *NodeConfig) TryUpdate(obj interface{}) (needUpdate bool) {
return false
}
//VersionUpdate add version index
// VersionUpdate add version index
func (n *NodeConfig) VersionUpdate() {
newVersion := atomic.AddInt64(&n.version, 1)
n.version = newVersion
}
//GetVersion get version
// GetVersion get version
func (n *NodeConfig) GetVersion() string {
return fmt.Sprintf("version_%d", n.version)
}
@ -137,7 +137,7 @@ type cacheHandler struct {
handler *ChainHandler
}
//GetServicesAndEndpoints get service and endpoint
// GetServicesAndEndpoints get service and endpoint
func (d *DiscoverServerManager) GetServicesAndEndpoints(namespace string, labelSelector labels.Selector) (ret []*corev1.Service, eret []*corev1.Endpoints) {
kcache.ListAllByNamespace(d.services.informer.GetIndexer(), namespace, labelSelector, func(s interface{}) {
ret = append(ret, s.(*corev1.Service))
@ -148,7 +148,7 @@ func (d *DiscoverServerManager) GetServicesAndEndpoints(namespace string, labelS
return
}
//NewNodeConfig new NodeConfig
// NewNodeConfig new NodeConfig
func (d *DiscoverServerManager) NewNodeConfig(config *corev1.ConfigMap) (*NodeConfig, error) {
logrus.Debugf("cm name: %s; plugin-config: %s", config.GetName(), config.Data["plugin-config"])
servicaAlias := config.Labels["service_alias"]
@ -169,7 +169,7 @@ func (d *DiscoverServerManager) NewNodeConfig(config *corev1.ConfigMap) (*NodeCo
return nc, nil
}
//UpdateNodeConfig update node config
// UpdateNodeConfig update node config
func (d *DiscoverServerManager) UpdateNodeConfig(nc *NodeConfig) error {
var services []*corev1.Service
var endpoint []*corev1.Endpoints
@ -256,7 +256,7 @@ func (d *DiscoverServerManager) setSnapshot(nc *NodeConfig) error {
return nil
}
//CreateDiscoverServerManager create discover server manager
// CreateDiscoverServerManager create discover server manager
func CreateDiscoverServerManager(clientset kubernetes.Interface, conf option.Conf) (*DiscoverServerManager, error) {
configcache := cache.NewSnapshotCache(false, Hasher{}, logrus.WithField("module", "config-cache"))
ctx, cancel := context.WithCancel(context.Background())
@ -292,7 +292,7 @@ func CreateDiscoverServerManager(clientset kubernetes.Interface, conf option.Con
const grpcMaxConcurrentStreams = 1000000
//Start server start
// Start server start
func (d *DiscoverServerManager) Start(errch chan error) error {
go func() {
go d.queue.Run(d.ctx.Done())
@ -331,7 +331,7 @@ func (d *DiscoverServerManager) Start(errch chan error) error {
return nil
}
//Stop stop grpc server
// Stop stop grpc server
func (d *DiscoverServerManager) Stop() {
//d.grpcServer.GracefulStop()
d.cancel()
@ -387,7 +387,7 @@ func (d *DiscoverServerManager) createEDSCacheHandler(informer kcache.SharedInde
return cacheHandler{informer: informer, handler: handler}
}
//AddNodeConfig add node config cache
// AddNodeConfig add node config cache
func (d *DiscoverServerManager) AddNodeConfig(nc *NodeConfig) {
var exist bool
for i, existNC := range d.cacheNodeConfig {
@ -406,7 +406,7 @@ func (d *DiscoverServerManager) AddNodeConfig(nc *NodeConfig) {
}
}
//DeleteNodeConfig delete node config cache
// DeleteNodeConfig delete node config cache
func (d *DiscoverServerManager) DeleteNodeConfig(nodeID string) {
for i, existNC := range d.cacheNodeConfig {
if existNC.nodeID == nodeID {

View File

@ -194,7 +194,11 @@ func (h *Helm) install(name, chart, version string, overrides []string, dryRun b
if err != nil {
return nil, err
}
var crdYaml string
crds := chartRequested.CRDObjects()
for _, crd := range crds {
crdYaml += string(crd.File.Data)
}
if err := checkIfInstallable(chartRequested); err != nil {
return nil, err
}
@ -231,8 +235,9 @@ func (h *Helm) install(name, chart, version string, overrides []string, dryRun b
}
}
}
return client.Run(chartRequested, vals)
rel, err := client.Run(chartRequested, vals)
rel.Manifest = strings.TrimPrefix(crdYaml+"\n"+rel.Manifest, "\n")
return rel, err
}
func (h *Helm) parseOverrides(overrides []string) (map[string]interface{}, error) {

View File

@ -191,7 +191,12 @@ func ensureService(new *corev1.Service, clientSet kubernetes.Interface) error {
return err
}
updateService := old.DeepCopy()
var clusterIP string
if updateService.Spec.ClusterIP != "" {
clusterIP = updateService.Spec.ClusterIP
}
updateService.Spec = new.Spec
updateService.Spec.ClusterIP = clusterIP
updateService.Labels = new.Labels
updateService.Annotations = new.Annotations
return persistUpdate(updateService, clientSet)