feat: containerd pull and push, kaniko build image (#1324)

Co-authored-by: 曲源成 <quyc@goodrain.com>
This commit is contained in:
Quyc 2022-09-19 12:33:21 +08:00 committed by yangkaa
parent 5a26a784aa
commit ca430ec466
24 changed files with 881 additions and 42329 deletions

View File

@ -22,6 +22,7 @@ import (
"context"
"encoding/base64"
"fmt"
"github.com/containerd/containerd"
"strings"
"github.com/goodrain/rainbond/db"
@ -34,7 +35,6 @@ import (
"k8s.io/client-go/kubernetes"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
func init() {
@ -82,32 +82,32 @@ type Response struct {
//Request build input
type Request struct {
RbdNamespace string
GRDataPVCName string
CachePVCName string
CacheMode string
CachePath string
TenantID string
SourceDir string
CacheDir string
TGZDir string
RepositoryURL string
CodeSouceInfo sources.CodeSourceInfo
Branch string
ServiceAlias string
ServiceID string
DeployVersion string
Runtime string
ServerType string
Commit Commit
Lang code.Lang
BuildEnvs map[string]string
Logger event.Logger
DockerClient *client.Client
KubeClient kubernetes.Interface
ExtraHosts []string
HostAlias []HostAlias
Ctx context.Context
RbdNamespace string
GRDataPVCName string
CachePVCName string
CacheMode string
CachePath string
TenantID string
SourceDir string
CacheDir string
TGZDir string
RepositoryURL string
CodeSouceInfo sources.CodeSourceInfo
Branch string
ServiceAlias string
ServiceID string
DeployVersion string
Runtime string
ServerType string
Commit Commit
Lang code.Lang
BuildEnvs map[string]string
Logger event.Logger
ContainerdClient *containerd.Client
KubeClient kubernetes.Interface
ExtraHosts []string
HostAlias []HostAlias
Ctx context.Context
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the

View File

@ -29,7 +29,6 @@ import (
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/builder"
jobc "github.com/goodrain/rainbond/builder/job"
@ -111,6 +110,7 @@ func (s *slugBuild) writeRunDockerfile(sourceDir, packageName string, envs map[s
ENV CODE_COMMIT_MESSAGE=${CODE_COMMIT_MESSAGE}
ENV VERSION=%s
`
logrus.Infof("cacheDir:%v, from:%v, packageName:%v, Dir(slugPackage):%v", sourceDir, builder.RUNNERIMAGENAME, packageName, path.Dir(packageName))
result := util.ParseVariable(fmt.Sprintf(runDockerfile, builder.RUNNERIMAGENAME, packageName, s.re.DeployVersion), envs)
return ioutil.WriteFile(path.Join(sourceDir, "Dockerfile"), []byte(result), 0755)
}
@ -137,51 +137,16 @@ func (s *slugBuild) buildRunnerImage(slugPackage string) (string, error) {
return "", fmt.Errorf("write default runtime dockerfile error:%s", err.Error())
}
//build runtime image
runbuildOptions := types.ImageBuildOptions{
BuildArgs: map[string]*string{
"CODE_COMMIT_HASH": &s.re.Commit.Hash,
"CODE_COMMIT_USER": &s.re.Commit.User,
"CODE_COMMIT_MESSAGE": &s.re.Commit.Message,
},
Tags: []string{imageName},
Remove: true,
NetworkMode: ImageBuildNetworkModeHost,
AuthConfigs: GetTenantRegistryAuthSecrets(s.re.Ctx, s.re.TenantID, s.re.KubeClient),
}
if _, ok := s.re.BuildEnvs["NO_CACHE"]; ok {
runbuildOptions.NoCache = true
} else {
runbuildOptions.NoCache = false
}
// pull image runner
if err := sources.ImagesPullAndPush(builder.RUNNERIMAGENAME, builder.ONLINERUNNERIMAGENAME, "", "", s.re.Logger); err != nil {
return "", fmt.Errorf("pull image %s: %v", builder.RUNNERIMAGENAME, err)
}
logrus.Infof("pull image %s successfully.", builder.RUNNERIMAGENAME)
err := sources.ImageBuild(s.re.DockerClient, cacheDir, runbuildOptions, s.re.Logger, 30)
err := sources.ImageBuild(cacheDir, s.re.RbdNamespace, s.re.ServiceID, s.re.DeployVersion, s.re.Logger, "run-build")
if err != nil {
s.re.Logger.Error(fmt.Sprintf("build image %s of new version failure", imageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())
return "", err
}
// check build image exist
_, err = sources.ImageInspectWithRaw(s.re.DockerClient, imageName)
if err != nil {
s.re.Logger.Error(fmt.Sprintf("build image %s of service version failure", imageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("get image inspect error: %s", err.Error())
return "", err
}
s.re.Logger.Info("build image of new version success, will push to local registry", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(s.re.DockerClient, imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, s.re.Logger, 10)
if err != nil {
s.re.Logger.Error("push image failure", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return "", err
}
s.re.Logger.Info("push image of new version success", map[string]string{"step": "builder-exector"})
if err := sources.ImageRemove(s.re.DockerClient, imageName); err != nil {
logrus.Errorf("remove image %s failure %s", imageName, err.Error())
}
return imageName, nil
}

View File

@ -19,16 +19,17 @@
package build
import (
"context"
"fmt"
"path"
"strconv"
"strings"
"github.com/docker/docker/api/types"
"github.com/goodrain/rainbond/builder"
"github.com/eapache/channels"
jobc "github.com/goodrain/rainbond/builder/job"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/util"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"path"
"time"
)
func dockerfileBuilder() (Build, error) {
@ -48,72 +49,170 @@ func (d *dockerfileBuild) Build(re *Request) (*Response, error) {
return nil, err
}
buildImageName := CreateImageName(re.ServiceID, re.DeployVersion)
buildOptions := types.ImageBuildOptions{
Tags: []string{buildImageName},
Remove: true,
BuildArgs: GetARGs(re.BuildEnvs),
NetworkMode: ImageBuildNetworkModeHost,
AuthConfigs: GetTenantRegistryAuthSecrets(re.Ctx, re.TenantID, re.KubeClient),
if err := d.stopPreBuildJob(re); err != nil {
logrus.Errorf("stop pre build job for service %s failure %s", re.ServiceID, err.Error())
}
if _, ok := re.BuildEnvs["NO_CACHE"]; ok {
buildOptions.NoCache = true
} else {
buildOptions.NoCache = false
}
re.Logger.Info("Start build image from dockerfile", map[string]string{"step": "builder-exector"})
timeout, _ := strconv.Atoi(re.BuildEnvs["TIMOUT"])
// min 10 minutes
if timeout < 10 {
timeout = 60
}
err = sources.ImageBuild(re.DockerClient, re.SourceDir, buildOptions, re.Logger, timeout)
if err != nil {
re.Logger.Error(fmt.Sprintf("build image %s failure", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())
if err := d.runBuildJob(re, buildImageName); err != nil {
re.Logger.Error(util.Translation("Compiling the source code failure"), map[string]string{"step": "build-code", "status": "failure"})
logrus.Error("build dockerfile job error,", err.Error())
return nil, err
}
// check image exist
_, err = sources.ImageInspectWithRaw(re.DockerClient, buildImageName)
if err != nil {
re.Logger.Error(fmt.Sprintf("Build image %s failure,view build logs", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("get image inspect error: %s", err.Error())
return nil, err
}
re.Logger.Info("The image build is successful and starts pushing the image to the repository", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(re.DockerClient, buildImageName, builder.REGISTRYUSER, builder.REGISTRYPASS, re.Logger, 20)
if err != nil {
re.Logger.Error("Push image failure", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return nil, err
}
re.Logger.Info("The image is pushed to the warehouse successfully", map[string]string{"step": "builder-exector"})
if err := sources.ImageRemove(re.DockerClient, buildImageName); err != nil {
logrus.Errorf("remove image %s failure %s", buildImageName, err.Error())
}
re.Logger.Info("code build success", map[string]string{"step": "build-exector"})
return &Response{
MediumPath: buildImageName,
MediumType: ImageMediumType,
}, nil
}
//GetARGs get args and parse value
func GetARGs(buildEnvs map[string]string) map[string]*string {
args := make(map[string]*string)
argStr := make(map[string]string)
for k, v := range buildEnvs {
if strings.Replace(v, " ", "", -1) == "" {
continue
}
if ks := strings.Split(k, "ARG_"); len(ks) > 1 {
value := v
args[ks[1]] = &value
argStr[ks[1]] = value
//The same component retains only one build task to perform
func (d *dockerfileBuild) stopPreBuildJob(re *Request) error {
jobList, err := jobc.GetJobController().GetServiceJobs(re.ServiceID)
if err != nil {
logrus.Errorf("get pre build job for service %s failure ,%s", re.ServiceID, err.Error())
}
if len(jobList) > 0 {
for _, job := range jobList {
jobc.GetJobController().DeleteJob(job.Name)
}
}
return nil
}
// Use kaniko to create a job to build an image
func (d *dockerfileBuild) runBuildJob(re *Request, buildImageName string) error {
name := fmt.Sprintf("%s-%s", re.ServiceID, re.DeployVersion)
namespace := re.RbdNamespace
job := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
"service": re.ServiceID,
"job": "codebuild",
},
},
}
podSpec := corev1.PodSpec{RestartPolicy: corev1.RestartPolicyOnFailure} // only support never and onfailure
volumes, mounts := d.createVolumeAndMount(re)
podSpec.Volumes = volumes
container := corev1.Container{
Name: name,
Image: "yangk/executor:latest",
Stdin: true,
StdinOnce: true,
Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"},
}
container.VolumeMounts = mounts
podSpec.Containers = append(podSpec.Containers, container)
job.Spec = podSpec
writer := re.Logger.GetWriter("builder", "info")
reChan := channels.NewRingChannel(10)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
logrus.Debugf("create job[name: %s; namespace: %s]", job.Name, job.Namespace)
err := jobc.GetJobController().ExecJob(ctx, &job, writer, reChan)
if err != nil {
logrus.Errorf("create new job:%s failed: %s", name, err.Error())
return err
}
re.Logger.Info(util.Translation("create build code job success"), map[string]string{"step": "build-exector"})
// delete job after complete
defer jobc.GetJobController().DeleteJob(job.Name)
return d.waitingComplete(re, reChan)
return nil
}
func (d *dockerfileBuild) createVolumeAndMount(re *Request) (volumes []corev1.Volume, volumeMounts []corev1.VolumeMount) {
hostPathType := corev1.HostPathDirectoryOrCreate
hostsFilePathType := corev1.HostPathFile
volumes = []corev1.Volume{
{
Name: "dockerfile-build",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: re.SourceDir,
Type: &hostPathType,
},
},
},
{
Name: "kaniko-secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "rbd-hub-credentials",
Items: []corev1.KeyToPath{
{
Key: ".dockerconfigjson",
Path: "config.json",
},
},
},
},
},
{
Name: "hosts",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/hosts",
Type: &hostsFilePathType,
},
},
},
}
volumeMounts = []corev1.VolumeMount{
{
Name: "dockerfile-build",
MountPath: "/workspace",
},
{
Name: "kaniko-secret",
MountPath: "/kaniko/.docker",
},
{
Name: "hosts",
MountPath: "/etc/hosts",
},
}
return volumes, volumeMounts
}
func (d *dockerfileBuild) waitingComplete(re *Request, reChan *channels.RingChannel) (err error) {
var logComplete = false
var jobComplete = false
timeout := time.NewTimer(time.Minute * 60)
for {
select {
case <-timeout.C:
return fmt.Errorf("build time out (more than 60 minute)")
case jobStatus := <-reChan.Out():
status := jobStatus.(string)
switch status {
case "complete":
jobComplete = true
if logComplete {
return nil
}
re.Logger.Info(util.Translation("build code job exec completed"), map[string]string{"step": "build-exector"})
case "failed":
jobComplete = true
err = fmt.Errorf("build code job exec failure")
if logComplete {
return err
}
re.Logger.Info(util.Translation("build code job exec failed"), map[string]string{"step": "build-exector"})
case "cancel":
jobComplete = true
err = fmt.Errorf("build code job is canceled")
if logComplete {
return err
}
case "logcomplete":
logComplete = true
if jobComplete {
return err
}
}
}
}
for k, arg := range args {
value := util.ParseVariable(*arg, argStr)
args[k] = &value
}
return args
}

View File

@ -24,9 +24,6 @@ import (
"os"
"path"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
@ -51,7 +48,6 @@ type netcoreBuild struct {
imageName string
buildImageName string
sourceDir string
dockercli *client.Client
logger event.Logger
serviceID string
}
@ -62,7 +58,6 @@ func netcoreBuilder() (Build, error) {
func (d *netcoreBuild) Build(re *Request) (*Response, error) {
defer d.clear()
d.dockercli = re.DockerClient
d.logger = re.Logger
d.serviceID = re.ServiceID
d.sourceDir = re.SourceDir
@ -74,39 +69,14 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
return nil, fmt.Errorf("write default dockerfile error:%s", err.Error())
}
// build image
runbuildOptions := types.ImageBuildOptions{
Tags: []string{d.imageName},
Remove: true,
NetworkMode: ImageBuildNetworkModeHost,
AuthConfigs: GetTenantRegistryAuthSecrets(re.Ctx, re.TenantID, re.KubeClient),
}
if _, ok := re.BuildEnvs["NO_CACHE"]; ok {
runbuildOptions.NoCache = true
} else {
runbuildOptions.NoCache = false
}
err := sources.ImageBuild(re.DockerClient, d.sourceDir, runbuildOptions, re.Logger, 60)
err := sources.ImageBuild(d.sourceDir, re.RbdNamespace, re.ServiceID, re.DeployVersion, re.Logger, "run-build")
if err != nil {
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("build image error: %s", err.Error())
return nil, err
}
// check build image exist
_, err = sources.ImageInspectWithRaw(re.DockerClient, d.imageName)
if err != nil {
re.Logger.Error(fmt.Sprintf("build image %s failure, find log in rbd-chaos", d.buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("get image inspect error: %s", err.Error())
return nil, err
}
re.Logger.Info("build image success, start to push local image registry", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(re.DockerClient, d.imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, re.Logger, 5)
if err != nil {
re.Logger.Error("push image to local image registry faliure, find log in rbd-chaos", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return nil, err
}
re.Logger.Info("push image to push local image registry success", map[string]string{"step": "builder-exector"})
if err := sources.ImageRemove(re.DockerClient, d.imageName); err != nil {
if err := sources.ImageRemove(re.ContainerdClient, d.imageName); err != nil {
logrus.Errorf("remove image %s failure %s", d.imageName, err.Error())
}
return d.createResponse(), nil

View File

@ -20,6 +20,7 @@ package clean
import (
"context"
"github.com/containerd/containerd"
"os"
"time"
@ -34,9 +35,10 @@ import (
//Manager CleanManager
type Manager struct {
dclient *client.Client
ctx context.Context
cancel context.CancelFunc
dclient *client.Client
containerdClient *containerd.Client
ctx context.Context
cancel context.CancelFunc
}
//CreateCleanManager create clean manager
@ -87,7 +89,7 @@ func (t *Manager) Start(errchan chan error) error {
if v.DeliveredType == "image" {
imagePath := v.DeliveredPath
//remove local image, However, it is important to note that the version image is stored in the image repository
err := sources.ImageRemove(t.dclient, imagePath)
err := sources.ImageRemove(t.containerdClient, imagePath)
if err != nil {
logrus.Error(err)
}

View File

@ -20,10 +20,11 @@ package exector
import (
"fmt"
"github.com/containerd/containerd"
"github.com/docker/distribution/reference"
"os"
"time"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/build"
"github.com/goodrain/rainbond/builder/sources"
@ -35,21 +36,21 @@ import (
//ImageBuildItem ImageBuildItem
type ImageBuildItem struct {
Namespace string `json:"namespace"`
TenantName string `json:"tenant_name"`
ServiceAlias string `json:"service_alias"`
Image string `json:"image"`
DestImage string `json:"dest_image"`
Logger event.Logger `json:"logger"`
EventID string `json:"event_id"`
DockerClient *client.Client
TenantID string
ServiceID string
DeployVersion string
HubUser string
HubPassword string
Action string
Configs map[string]gjson.Result `json:"configs"`
Namespace string `json:"namespace"`
TenantName string `json:"tenant_name"`
ServiceAlias string `json:"service_alias"`
Image string `json:"image"`
DestImage string `json:"dest_image"`
Logger event.Logger `json:"logger"`
EventID string `json:"event_id"`
ContainerdClient *containerd.Client
TenantID string
ServiceID string
DeployVersion string
HubUser string
HubPassword string
Action string
Configs map[string]gjson.Result `json:"configs"`
}
//NewImageBuildItem 创建实体
@ -75,31 +76,36 @@ func NewImageBuildItem(in []byte) *ImageBuildItem {
//Run Run
func (i *ImageBuildItem) Run(timeout time.Duration) error {
user, pass := builder.GetImageUserInfoV2(i.Image, i.HubUser, i.HubPassword)
_, err := sources.ImagePull(i.DockerClient, i.Image, user, pass, i.Logger, 30)
_, err := sources.ImagePull(i.ContainerdClient, i.Image, user, pass, i.Logger, 30)
if err != nil {
logrus.Errorf("pull image %s error: %s", i.Image, err.Error())
i.Logger.Error(fmt.Sprintf("获取指定镜像: %s失败", i.Image), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
rf, err := reference.ParseAnyReference(i.Image)
if err != nil {
logrus.Errorf("reference image error: %s", err.Error())
return err
}
localImageURL := build.CreateImageName(i.ServiceID, i.DeployVersion)
if err := sources.ImageTag(i.DockerClient, i.Image, localImageURL, i.Logger, 1); err != nil {
if err := sources.ImageTag(i.ContainerdClient, rf.String(), localImageURL, i.Logger, 1); err != nil {
logrus.Errorf("change image tag error: %s", err.Error())
i.Logger.Error(fmt.Sprintf("修改镜像tag: %s -> %s 失败", i.Image, localImageURL), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
err = sources.ImagePush(i.DockerClient, localImageURL, builder.REGISTRYUSER, builder.REGISTRYPASS, i.Logger, 30)
err = sources.ImagePush(i.ContainerdClient, localImageURL, builder.REGISTRYUSER, builder.REGISTRYPASS, i.Logger, 30)
if err != nil {
logrus.Errorf("push image into registry error: %s", err.Error())
i.Logger.Error("推送镜像至镜像仓库失败", map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
if err := sources.ImageRemove(i.DockerClient, localImageURL); err != nil {
if err := sources.ImageRemove(i.ContainerdClient, localImageURL); err != nil {
logrus.Errorf("remove image %s failure %s", localImageURL, err.Error())
}
if os.Getenv("DISABLE_IMAGE_CACHE") == "true" {
if err := sources.ImageRemove(i.DockerClient, i.Image); err != nil {
if err := sources.ImageRemove(i.ContainerdClient, i.Image); err != nil {
logrus.Errorf("remove image %s failure %s", i.Image, err.Error())
}
}

View File

@ -21,6 +21,7 @@ package exector
import (
"context"
"fmt"
"github.com/containerd/containerd"
"github.com/goodrain/rainbond/builder/parser"
"io/ioutil"
"os"
@ -47,34 +48,35 @@ import (
//SourceCodeBuildItem SouceCodeBuildItem
type SourceCodeBuildItem struct {
Namespace string `json:"namespace"`
TenantName string `json:"tenant_name"`
GRDataPVCName string `json:"gr_data_pvc_name"`
CachePVCName string `json:"cache_pvc_name"`
CacheMode string `json:"cache_mode"`
CachePath string `json:"cache_path"`
ServiceAlias string `json:"service_alias"`
Action string `json:"action"`
DestImage string `json:"dest_image"`
Logger event.Logger `json:"logger"`
EventID string `json:"event_id"`
CacheDir string `json:"cache_dir"`
TGZDir string `json:"tgz_dir"`
DockerClient *client.Client
KubeClient kubernetes.Interface
RbdNamespace string
RbdRepoName string
TenantID string
ServiceID string
DeployVersion string
Lang string
Runtime string
BuildEnvs map[string]string
CodeSouceInfo sources.CodeSourceInfo
RepoInfo *sources.RepostoryBuildInfo
commit Commit
Configs map[string]gjson.Result `json:"configs"`
Ctx context.Context
Namespace string `json:"namespace"`
TenantName string `json:"tenant_name"`
GRDataPVCName string `json:"gr_data_pvc_name"`
CachePVCName string `json:"cache_pvc_name"`
CacheMode string `json:"cache_mode"`
CachePath string `json:"cache_path"`
ServiceAlias string `json:"service_alias"`
Action string `json:"action"`
DestImage string `json:"dest_image"`
Logger event.Logger `json:"logger"`
EventID string `json:"event_id"`
CacheDir string `json:"cache_dir"`
TGZDir string `json:"tgz_dir"`
DockerClient *client.Client
containerdClient *containerd.Client
KubeClient kubernetes.Interface
RbdNamespace string
RbdRepoName string
TenantID string
ServiceID string
DeployVersion string
Lang string
Runtime string
BuildEnvs map[string]string
CodeSouceInfo sources.CodeSourceInfo
RepoInfo *sources.RepostoryBuildInfo
commit Commit
Configs map[string]gjson.Result `json:"configs"`
Ctx context.Context
}
//Commit code Commit
@ -303,31 +305,31 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
return nil, err
}
buildReq := &build.Request{
RbdNamespace: i.RbdNamespace,
SourceDir: i.RepoInfo.GetCodeBuildAbsPath(),
CacheDir: i.CacheDir,
TGZDir: i.TGZDir,
RepositoryURL: i.RepoInfo.RepostoryURL,
CodeSouceInfo: i.CodeSouceInfo,
ServiceAlias: i.ServiceAlias,
ServiceID: i.ServiceID,
TenantID: i.TenantID,
ServerType: i.CodeSouceInfo.ServerType,
Runtime: i.Runtime,
Branch: i.CodeSouceInfo.Branch,
DeployVersion: i.DeployVersion,
Commit: build.Commit{User: i.commit.Author, Message: i.commit.Message, Hash: i.commit.Hash},
Lang: code.Lang(i.Lang),
BuildEnvs: i.BuildEnvs,
Logger: i.Logger,
DockerClient: i.DockerClient,
KubeClient: i.KubeClient,
HostAlias: hostAlias,
Ctx: i.Ctx,
GRDataPVCName: i.GRDataPVCName,
CachePVCName: i.CachePVCName,
CacheMode: i.CacheMode,
CachePath: i.CachePath,
RbdNamespace: i.RbdNamespace,
SourceDir: i.RepoInfo.GetCodeBuildAbsPath(),
CacheDir: i.CacheDir,
TGZDir: i.TGZDir,
RepositoryURL: i.RepoInfo.RepostoryURL,
CodeSouceInfo: i.CodeSouceInfo,
ServiceAlias: i.ServiceAlias,
ServiceID: i.ServiceID,
TenantID: i.TenantID,
ServerType: i.CodeSouceInfo.ServerType,
Runtime: i.Runtime,
Branch: i.CodeSouceInfo.Branch,
DeployVersion: i.DeployVersion,
Commit: build.Commit{User: i.commit.Author, Message: i.commit.Message, Hash: i.commit.Hash},
Lang: code.Lang(i.Lang),
BuildEnvs: i.BuildEnvs,
Logger: i.Logger,
ContainerdClient: i.containerdClient,
KubeClient: i.KubeClient,
HostAlias: hostAlias,
Ctx: i.Ctx,
GRDataPVCName: i.GRDataPVCName,
CachePVCName: i.CachePVCName,
CacheMode: i.CacheMode,
CachePath: i.CachePath,
}
res, err := codeBuild.Build(buildReq)
return res, err

View File

@ -35,14 +35,15 @@ import (
"github.com/containerd/containerd/namespaces"
"github.com/coreos/etcd/clientv3"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
"github.com/goodrain/rainbond/builder/job"
"github.com/goodrain/rainbond/cmd/builder/option"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/mq/api/grpc/pb"
"github.com/goodrain/rainbond/util"
"github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
dbmodel "github.com/goodrain/rainbond/db/model"
mqclient "github.com/goodrain/rainbond/mq/client"
@ -71,10 +72,6 @@ type Manager interface {
//NewManager new manager
func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
dockerClient, err := client.NewEnvClient()
if err != nil {
return nil, err
}
sock := os.Getenv("CONTAINERD_SOCK")
if sock == "" {
sock = "/run/containerd/containerd.sock"
@ -123,7 +120,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
}
logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask)
return &exectorManager{
DockerClient: dockerClient,
ContainerdClient: containerdClient,
KubeClient: kubeClient,
EtcdCli: etcdCli,
mqClient: mqc,
@ -142,6 +139,7 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
type exectorManager struct {
DockerClient *client.Client
ContainerdClient *containerd.Client
KubeClient kubernetes.Interface
EtcdCli *clientv3.Client
tasks chan *pb.TaskMessage
@ -293,7 +291,7 @@ func (e *exectorManager) exec(task *pb.TaskMessage) error {
//buildFromImage build app from docker image
func (e *exectorManager) buildFromImage(task *pb.TaskMessage) {
i := NewImageBuildItem(task.TaskBody)
i.DockerClient = e.DockerClient
i.ContainerdClient = e.ContainerdClient
i.Logger.Info("Start with the image build application task", map[string]string{"step": "builder-exector", "status": "starting"})
defer event.GetManager().ReleaseLogger(i.Logger)
defer func() {
@ -537,7 +535,7 @@ func (e *exectorManager) slugShare(task *pb.TaskMessage) {
//imageShare share app of docker image
func (e *exectorManager) imageShare(task *pb.TaskMessage) {
i, err := NewImageShareItem(task.TaskBody, e.DockerClient, e.EtcdCli)
i, err := NewImageShareItem(task.TaskBody, e.ContainerdClient, e.EtcdCli)
if err != nil {
logrus.Error("create share image task error.", err.Error())
i.Logger.Error(util.Translation("create share image task error"), map[string]string{"step": "builder-exector", "status": "failure"})

View File

@ -20,6 +20,7 @@ package exector
import (
"fmt"
"github.com/containerd/containerd"
"github.com/goodrain/rainbond/builder"
"io/ioutil"
"os"
@ -55,17 +56,17 @@ var maxBackupVersionSize = 3
//BackupAPPNew backup group app new version
type BackupAPPNew struct {
GroupID string `json:"group_id" `
ServiceIDs []string `json:"service_ids" `
Version string `json:"version"`
EventID string
SourceDir string `json:"source_dir"`
SourceType string `json:"source_type"`
BackupID string `json:"backup_id"`
BackupSize int64
Logger event.Logger
DockerClient *client.Client
GroupID string `json:"group_id" `
ServiceIDs []string `json:"service_ids" `
Version string `json:"version"`
EventID string
SourceDir string `json:"source_dir"`
SourceType string `json:"source_type"`
BackupID string `json:"backup_id"`
BackupSize int64
Logger event.Logger
DockerClient *client.Client
ContainerdClient *containerd.Client
//full-online,full-offline
Mode string `json:"mode"`
S3Config struct {
@ -309,7 +310,7 @@ func (b *BackupAPPNew) backupPluginInfo(appSnapshot *AppSnapshot) error {
for _, pv := range appSnapshot.PluginBuildVersions {
dstDir := fmt.Sprintf("%s/plugin_%s/image_%s.tar", b.SourceDir, pv.PluginID, pv.DeployVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if _, err := sources.ImagePull(b.DockerClient, pv.BuildLocalImage, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 20); err != nil {
if _, err := sources.ImagePull(b.ContainerdClient, pv.BuildLocalImage, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 20); err != nil {
b.Logger.Error(fmt.Sprintf("plugin image: %s; failed to pull image", pv.BuildLocalImage), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf("plugin image: %s; failed to pull image: %v", pv.BuildLocalImage, err)
return err
@ -375,7 +376,7 @@ func (b *BackupAPPNew) saveSlugPkg(app *RegionServiceSnapshot, version *dbmodel.
func (b *BackupAPPNew) saveImagePkg(app *RegionServiceSnapshot, version *dbmodel.VersionInfo) error {
dstDir := fmt.Sprintf("%s/app_%s/image_%s.tar", b.SourceDir, app.ServiceID, version.BuildVersion)
util.CheckAndCreateDir(filepath.Dir(dstDir))
if _, err := sources.ImagePull(b.DockerClient, version.DeliveredPath, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 20); err != nil {
if _, err := sources.ImagePull(b.ContainerdClient, version.DeliveredPath, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 20); err != nil {
b.Logger.Error(util.Translation("error pulling image"), map[string]string{"step": "backup_builder", "status": "failure"})
logrus.Errorf(fmt.Sprintf("image: %s; error pulling image: %v", version.DeliveredPath, err), version.DeliveredPath, err.Error())
}

View File

@ -21,6 +21,7 @@ package exector
import (
"context"
"fmt"
"github.com/containerd/containerd"
"io/ioutil"
"os"
"path"
@ -55,10 +56,11 @@ type BackupAPPRestore struct {
//RestoreMode(cdct) current datacenter and current tenant
//RestoreMode(cdot) current datacenter and other tenant
//RestoreMode(od) other datacenter
RestoreMode string `json:"restore_mode"`
RestoreID string `json:"restore_id"`
DockerClient *client.Client
cacheDir string
RestoreMode string `json:"restore_mode"`
RestoreID string `json:"restore_id"`
DockerClient *client.Client
ContainerdClient *containerd.Client
cacheDir string
//serviceChange key: oldServiceID
serviceChange map[string]*Info
volumeIDMap map[uint]uint
@ -351,7 +353,7 @@ func (b *BackupAPPRestore) restoreVersionAndData(backup *dbmodel.AppBackup, appS
}
imageName := getNewImageName(pb.BuildLocalImage)
if imageName != "" {
if err := sources.ImagePush(b.DockerClient, imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 30); err != nil {
if err := sources.ImagePush(b.ContainerdClient, imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 30); err != nil {
b.Logger.Error("push plugin image failure", map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("failure push image %s: %v", imageName, err)
return err
@ -394,7 +396,7 @@ func (b *BackupAPPRestore) downloadImage(backup *dbmodel.AppBackup, app *RegionS
}
newImageName := getNewImageName(imageName)
if newImageName != imageName {
if err := sources.ImageTag(b.DockerClient, imageName, newImageName, b.Logger, 3); err != nil {
if err := sources.ImageTag(b.ContainerdClient, imageName, newImageName, b.Logger, 3); err != nil {
b.Logger.Error(util.Translation("change image tag error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("change image tag %s to %s failure, %s", imageName, newImageName, err.Error())
return err
@ -402,7 +404,7 @@ func (b *BackupAPPRestore) downloadImage(backup *dbmodel.AppBackup, app *RegionS
imageName = newImageName
}
if imageName != "" {
if err := sources.ImagePush(b.DockerClient, imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 30); err != nil {
if err := sources.ImagePush(b.ContainerdClient, imageName, builder.REGISTRYUSER, builder.REGISTRYPASS, b.Logger, 30); err != nil {
b.Logger.Error(util.Translation("push image to local hub error"), map[string]string{"step": "restore_builder", "status": "failure"})
logrus.Errorf("push image to local hub error when restore backup app, %s", err.Error())
return err

View File

@ -24,14 +24,12 @@ import (
"strings"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/build"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/util"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/event"
"github.com/docker/docker/api/types"
"github.com/pquerna/ffjson/ffjson"
"github.com/goodrain/rainbond/builder/model"
@ -106,31 +104,14 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, logger event.Logger)
mm := strings.Split(t.GitURL, "/")
n1 := strings.Split(mm[len(mm)-1], ".")[0]
buildImageName := fmt.Sprintf(builder.REGISTRYDOMAIN+"/plugin_%s_%s:%s", n1, t.PluginID, t.DeployVersion)
buildOptions := types.ImageBuildOptions{
Tags: []string{buildImageName},
Remove: true,
NetworkMode: build.ImageBuildNetworkModeHost,
AuthConfigs: build.GetTenantRegistryAuthSecrets(e.ctx, t.TenantID, e.KubeClient),
}
if noCache := os.Getenv("NO_CACHE"); noCache != "" {
buildOptions.NoCache = true
} else {
buildOptions.NoCache = false
}
logger.Info("start build image", map[string]string{"step": "builder-exector"})
err := sources.ImageBuild(e.DockerClient, sourceDir, buildOptions, logger, 5)
err := sources.ImageBuild(sourceDir, "rbd-system", t.PluginID, t.DeployVersion, logger, "plug-build")
if err != nil {
logger.Error(fmt.Sprintf("build image %s failure,find log in rbd-chaos", buildImageName), map[string]string{"step": "builder-exector", "status": "failure"})
logrus.Errorf("[plugin]build image error: %s", err.Error())
return err
}
logger.Info("build image success, start to push image to local image registry", map[string]string{"step": "builder-exector"})
err = sources.ImagePush(e.DockerClient, buildImageName, builder.REGISTRYUSER, builder.REGISTRYPASS, logger, 2)
if err != nil {
logger.Error("push image failure, find log in rbd-chaos", map[string]string{"step": "builder-exector"})
logrus.Errorf("push image error: %s", err.Error())
return err
}
logger.Info("push image success", map[string]string{"step": "build-exector"})
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByDeployVersion(t.PluginID, t.VersionID, t.DeployVersion)
if err != nil {

View File

@ -69,21 +69,21 @@ func (e *exectorManager) pluginImageBuild(task *pb.TaskMessage) {
func (e *exectorManager) run(t *model.BuildPluginTaskBody, logger event.Logger) error {
hubUser, hubPass := builder.GetImageUserInfoV2(t.ImageURL, t.ImageInfo.HubUser, t.ImageInfo.HubPassword)
if _, err := sources.ImagePull(e.DockerClient, t.ImageURL, hubUser, hubPass, logger, 10); err != nil {
if _, err := sources.ImagePull(e.ContainerdClient, t.ImageURL, hubUser, hubPass, logger, 10); err != nil {
logrus.Errorf("pull image %v error, %v", t.ImageURL, err)
logger.Error("拉取镜像失败", map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
logger.Info("拉取镜像完成", map[string]string{"step": "build-exector", "status": "complete"})
newTag := createPluginImageTag(t.ImageURL, t.PluginID, t.DeployVersion)
err := sources.ImageTag(e.DockerClient, t.ImageURL, newTag, logger, 1)
err := sources.ImageTag(e.ContainerdClient, t.ImageURL, newTag, logger, 1)
if err != nil {
logrus.Errorf("set plugin image tag error, %v", err)
logger.Error("修改镜像tag失败", map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
logger.Info("修改镜像Tag完成", map[string]string{"step": "build-exector", "status": "complete"})
if err := sources.ImagePush(e.DockerClient, newTag, builder.REGISTRYUSER, builder.REGISTRYPASS, logger, 10); err != nil {
if err := sources.ImagePush(e.ContainerdClient, newTag, builder.REGISTRYUSER, builder.REGISTRYPASS, logger, 10); err != nil {
logrus.Errorf("push image %s error, %v", newTag, err)
logger.Error("推送镜像失败", map[string]string{"step": "builder-exector", "status": "failure"})
return err

View File

@ -102,7 +102,7 @@ func (e *exectorManager) serviceCheck(task *pb.TaskMessage) {
var pr parser.Parser
switch input.SourceType {
case "docker-run":
pr = parser.CreateDockerRunOrImageParse(input.Username, input.Password, input.SourceBody, e.DockerClient, logger)
pr = parser.CreateDockerRunOrImageParse(input.Username, input.Password, input.SourceBody, e.ContainerdClient, logger)
case "docker-compose":
var yamlbody = input.SourceBody
if input.SourceBody[0] == '{' {

View File

@ -21,6 +21,7 @@ package exector
import (
"context"
"fmt"
"github.com/containerd/containerd"
"github.com/goodrain/rainbond/builder"
@ -58,12 +59,13 @@ type ImageShareItem struct {
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
} `json:"share_info"`
DockerClient *client.Client
EtcdCli *clientv3.Client
DockerClient *client.Client
ContainerdClient *containerd.Client
EtcdCli *clientv3.Client
}
//NewImageShareItem 创建实体
func NewImageShareItem(in []byte, DockerClient *client.Client, EtcdCli *clientv3.Client) (*ImageShareItem, error) {
func NewImageShareItem(in []byte, containerdClient *containerd.Client, EtcdCli *clientv3.Client) (*ImageShareItem, error) {
var isi ImageShareItem
if err := ffjson.Unmarshal(in, &isi); err != nil {
return nil, err
@ -72,7 +74,7 @@ func NewImageShareItem(in []byte, DockerClient *client.Client, EtcdCli *clientv3
isi.LocalImagePassword = builder.REGISTRYPASS
eventID := isi.ShareInfo.EventID
isi.Logger = event.GetManager().GetLogger(eventID)
isi.DockerClient = DockerClient
isi.ContainerdClient = containerdClient
isi.EtcdCli = EtcdCli
return &isi, nil
}
@ -80,22 +82,22 @@ func NewImageShareItem(in []byte, DockerClient *client.Client, EtcdCli *clientv3
//ShareService ShareService
func (i *ImageShareItem) ShareService() error {
hubuser, hubpass := builder.GetImageUserInfoV2(i.LocalImageName, i.LocalImageUsername, i.LocalImagePassword)
_, err := sources.ImagePull(i.DockerClient, i.LocalImageName, hubuser, hubpass, i.Logger, 20)
_, err := sources.ImagePull(i.ContainerdClient, i.LocalImageName, hubuser, hubpass, i.Logger, 20)
if err != nil {
logrus.Errorf("pull image %s error: %s", i.LocalImageName, err.Error())
i.Logger.Error(fmt.Sprintf("拉取应用镜像: %s失败", i.LocalImageName), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
if err := sources.ImageTag(i.DockerClient, i.LocalImageName, i.ImageName, i.Logger, 1); err != nil {
if err := sources.ImageTag(i.ContainerdClient, i.LocalImageName, i.ImageName, i.Logger, 1); err != nil {
logrus.Errorf("change image tag error: %s", err.Error())
i.Logger.Error(fmt.Sprintf("修改镜像tag: %s -> %s 失败", i.LocalImageName, i.ImageName), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
user, pass := builder.GetImageUserInfoV2(i.ImageName, i.ShareInfo.ImageInfo.HubUser, i.ShareInfo.ImageInfo.HubPassword)
if i.ShareInfo.ImageInfo.IsTrust {
err = sources.TrustedImagePush(i.DockerClient, i.ImageName, user, pass, i.Logger, 30)
err = sources.TrustedImagePush(i.ContainerdClient, i.ImageName, user, pass, i.Logger, 30)
} else {
err = sources.ImagePush(i.DockerClient, i.ImageName, user, pass, i.Logger, 30)
err = sources.ImagePush(i.ContainerdClient, i.ImageName, user, pass, i.Logger, 30)
}
if err != nil {
if err.Error() == "authentication required" {

View File

@ -21,6 +21,7 @@ package exector
import (
"context"
"fmt"
"github.com/containerd/containerd"
"time"
"github.com/goodrain/rainbond/builder"
@ -48,8 +49,9 @@ type PluginShareItem struct {
Namespace string `json:"namespace"`
IsTrust bool `json:"is_trust,omitempty"`
} `json:"image_info,omitempty"`
DockerClient *client.Client
EtcdCli *clientv3.Client
DockerClient *client.Client
ContainerdClient *containerd.Client
EtcdCli *clientv3.Client
}
func init() {
@ -74,22 +76,22 @@ func SharePluginItemCreater(in []byte, m *exectorManager) (TaskWorker, error) {
//Run Run
func (i *PluginShareItem) Run(timeout time.Duration) error {
_, err := sources.ImagePull(i.DockerClient, i.LocalImageName, builder.REGISTRYUSER, builder.REGISTRYPASS, i.Logger, 10)
_, err := sources.ImagePull(i.ContainerdClient, i.LocalImageName, builder.REGISTRYUSER, builder.REGISTRYPASS, i.Logger, 10)
if err != nil {
logrus.Errorf("pull image %s error: %s", i.LocalImageName, err.Error())
i.Logger.Error(fmt.Sprintf("拉取应用镜像: %s失败", i.LocalImageName), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
if err := sources.ImageTag(i.DockerClient, i.LocalImageName, i.ImageName, i.Logger, 1); err != nil {
if err := sources.ImageTag(i.ContainerdClient, i.LocalImageName, i.ImageName, i.Logger, 1); err != nil {
logrus.Errorf("change image tag error: %s", err.Error())
i.Logger.Error(fmt.Sprintf("修改镜像tag: %s -> %s 失败", i.LocalImageName, i.ImageName), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
user, pass := builder.GetImageUserInfoV2(i.ImageName, i.ImageInfo.HubUser, i.ImageInfo.HubPassword)
if i.ImageInfo.IsTrust {
err = sources.TrustedImagePush(i.DockerClient, i.ImageName, user, pass, i.Logger, 10)
err = sources.TrustedImagePush(i.ContainerdClient, i.ImageName, user, pass, i.Logger, 10)
} else {
err = sources.ImagePush(i.DockerClient, i.ImageName, user, pass, i.Logger, 10)
err = sources.ImagePush(i.ContainerdClient, i.ImageName, user, pass, i.Logger, 10)
}
if err != nil {
if err.Error() == "authentication required" {

View File

@ -19,14 +19,20 @@
package parser
import (
"encoding/json"
"fmt"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/docker/distribution/reference" //"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder/parser/types"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/net/context"
"runtime"
"strconv"
"strings" //"github.com/docker/docker/client"
@ -34,35 +40,35 @@ import (
//DockerRunOrImageParse docker run 命令解析或直接镜像名解析
type DockerRunOrImageParse struct {
user, pass string
ports map[int]*types.Port
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
serviceType string
memory int
image Image
args []string
errors []ParseError
dockerclient *client.Client
logger event.Logger
user, pass string
ports map[int]*types.Port
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
serviceType string
memory int
image Image
args []string
errors []ParseError
containerdClient *containerd.Client
logger event.Logger
}
//CreateDockerRunOrImageParse create parser
func CreateDockerRunOrImageParse(user, pass, source string, dockerclient *client.Client, logger event.Logger) *DockerRunOrImageParse {
func CreateDockerRunOrImageParse(user, pass, source string, containerdClient *containerd.Client, logger event.Logger) *DockerRunOrImageParse {
source = strings.TrimLeft(source, " ")
source = strings.Replace(source, "\n", "", -1)
source = strings.Replace(source, "\\", "", -1)
source = strings.Replace(source, " ", " ", -1)
return &DockerRunOrImageParse{
user: user,
pass: pass,
source: source,
dockerclient: dockerclient,
ports: make(map[int]*types.Port),
volumes: make(map[string]*types.Volume),
envs: make(map[string]*types.Env),
logger: logger,
user: user,
pass: pass,
source: source,
containerdClient: containerdClient,
ports: make(map[int]*types.Port),
volumes: make(map[string]*types.Volume),
envs: make(map[string]*types.Env),
logger: logger,
}
}
@ -94,7 +100,7 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
d.image = ParseImageName(d.source)
}
//获取镜像,验证是否存在
imageInspect, err := sources.ImagePull(d.dockerclient, d.image.Source(), d.user, d.pass, d.logger, 10)
imageInspect, err := sources.ImagePull(d.containerdClient, d.image.Source(), d.user, d.pass, d.logger, 10)
if err != nil {
if strings.Contains(err.Error(), "No such image") {
d.errappend(ErrorAndSolve(FatalError, fmt.Sprintf("镜像(%s)不存在", d.image.String()), SolveAdvice("modify_image", "请确认输入镜像名是否正确")))
@ -107,8 +113,15 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
}
return d.errors
}
if imageInspect != nil && imageInspect.ContainerConfig != nil {
for _, env := range imageInspect.ContainerConfig.Env {
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
image, err := d.containerdClient.GetImage(ctx, d.image.name.String())
if err!= nil{
fmt.Println("containerd get image error:", err)
return nil
}
imgConfig, err := getImageConfig(ctx, image)
if imageInspect != nil && imgConfig != nil {
for _, env := range imgConfig.Env {
envinfo := strings.Split(env, "=")
if len(envinfo) == 2 {
if _, ok := d.envs[envinfo[0]]; !ok {
@ -116,14 +129,22 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
}
}
}
for k := range imageInspect.ContainerConfig.Volumes {
for k := range imgConfig.Volumes {
if _, ok := d.volumes[k]; !ok {
d.volumes[k] = &types.Volume{VolumePath: k, VolumeType: model.ShareFileVolumeType.String()}
}
}
for k := range imageInspect.ContainerConfig.ExposedPorts {
proto := k.Proto()
port := k.Int()
for k := range imgConfig.ExposedPorts {
res := strings.Split(k,"/")
if len(res) > 2 {
fmt.Println("The exposedPorts format is incorrect")
}
proto := res[1]
port, err := strconv.Atoi(res[0])
if err != nil{
fmt.Println("port int error", err)
return nil
}
if proto != "udp" {
proto = GetPortProtocol(port)
}
@ -289,3 +310,25 @@ func (d *DockerRunOrImageParse) GetServiceInfo() []ServiceInfo {
}
return []ServiceInfo{serviceInfo}
}
func getImageConfig(ctx context.Context, image containerd.Image) (*ocispec.ImageConfig, error) {
desc, err := image.Config(ctx)
if err != nil {
return nil, err
}
switch desc.MediaType {
case ocispec.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
var ocispecImage ocispec.Image
b, err := content.ReadBlob(ctx, image.ContentStore(), desc)
if err != nil {
return nil, err
}
if err := json.Unmarshal(b, &ocispecImage); err != nil {
return nil, err
}
return &ocispecImage.Config, nil
default:
return nil, fmt.Errorf("unknown media type %q", desc.MediaType)
}
}

View File

@ -20,29 +20,36 @@ package sources
import (
"bufio"
"crypto/tls"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/containerd/containerd"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/config"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/eapache/channels"
"github.com/goodrain/rainbond/builder"
jobc "github.com/goodrain/rainbond/builder/job"
"github.com/goodrain/rainbond/builder/model"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"io"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"os"
"path"
"path/filepath"
"strings"
"time"
dtypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/goodrain/rainbond/builder"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/goodrain/rainbond/builder/model"
"github.com/goodrain/rainbond/event"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
//ErrorNoAuth error no auth
@ -53,22 +60,8 @@ var ErrorNoImage = fmt.Errorf("image not exist")
//ImagePull pull docker image
//timeout minutes of the unit
func ImagePull(dockerCli *client.Client, image string, username, password string, logger event.Logger, timeout int) (*types.ImageInspect, error) {
func ImagePull(containerdClient *containerd.Client, image string, username, password string, logger event.Logger, timeout int) (*images.Image, error) {
printLog(logger, "info", fmt.Sprintf("start get image:%s", image), map[string]string{"step": "pullimage"})
var pullipo types.ImagePullOptions
if username != "" && password != "" {
auth, err := EncodeAuthToBase64(types.AuthConfig{Username: username, Password: password})
if err != nil {
logrus.Errorf("make auth base63 push image error: %s", err.Error())
printLog(logger, "error", fmt.Sprintf("Failed to generate a Token to get the image"), map[string]string{"step": "builder-exector", "status": "failure"})
return nil, err
}
pullipo = types.ImagePullOptions{
RegistryAuth: auth,
}
} else {
pullipo = types.ImagePullOptions{}
}
rf, err := reference.ParseAnyReference(image)
if err != nil {
logrus.Errorf("reference image error: %s", err.Error())
@ -78,10 +71,28 @@ func ImagePull(dockerCli *client.Client, image string, username, password string
if timeout < 1 {
timeout = 1
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*time.Duration(timeout))
defer cancel()
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
defaultTLS := &tls.Config{
InsecureSkipVerify: true,
}
hostOpt := config.HostOptions{}
hostOpt.DefaultTLS = defaultTLS
if username != "" && password != "" {
hostOpt.Credentials = func(host string) (string, string, error) {
return username, password, nil
}
}
options := docker.ResolverOptions{
Tracker: docker.NewInMemoryTracker(),
Hosts: config.ConfigureHosts(ctx, hostOpt),
}
pullOpts := []containerd.RemoteOpt{
containerd.WithPullUnpack,
containerd.WithResolver(docker.NewResolver(options)),
}
//TODO: 使用1.12版本api的bug “repository name must be canonical”使用rf.String()完整的镜像地址
readcloser, err := dockerCli.ImagePull(ctx, rf.String(), pullipo)
_, err = containerdClient.Pull(ctx, rf.String(), pullOpts...)
if err != nil {
logrus.Debugf("image name: %s readcloser error: %v", image, err.Error())
if strings.HasSuffix(err.Error(), "does not exist or no pull access") {
@ -90,51 +101,45 @@ func ImagePull(dockerCli *client.Client, image string, username, password string
}
return nil, err
}
defer readcloser.Close()
dec := json.NewDecoder(readcloser)
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
logrus.Debugf("error decoding jm(JSONMessage): %v", err)
return nil, err
}
if jm.Error != nil {
logrus.Debugf("error pulling image: %v", jm.Error)
return nil, jm.Error
}
printLog(logger, "debug", fmt.Sprintf(jm.JSONString()), map[string]string{"step": "progress"})
logrus.Debug(jm.JSONString())
}
printLog(logger, "debug", "Get the image information and its raw representation", map[string]string{"step": "progress"})
ins, _, err := dockerCli.ImageInspectWithRaw(ctx, image)
imageService := containerdClient.ImageService()
imageObj, err := imageService.Get(ctx, rf.String())
if err != nil {
printLog(logger, "debug", "Fail to get the image information and its raw representation", map[string]string{"step": "progress"})
return nil, err
return nil, fmt.Errorf("image(%v) pull error", image)
}
logrus.Infof("pull image taget:", imageObj.Target)
//defer readcloser.Close()
printLog(logger, "info", fmt.Sprintf("Success Pull Image%s", image), map[string]string{"step": "pullimage"})
return &ins, nil
return &imageObj, nil
}
//ImageTag change docker image tag
func ImageTag(dockerCli *client.Client, source, target string, logger event.Logger, timeout int) error {
func ImageTag(containerdClient *containerd.Client, source, target string, logger event.Logger, timeout int) error {
logrus.Debugf(fmt.Sprintf("change image tag%s -> %s", source, target))
printLog(logger, "info", fmt.Sprintf("change image tag%s -> %s", source, target), map[string]string{"step": "changetag"})
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*time.Duration(timeout))
defer cancel()
err := dockerCli.ImageTag(ctx, source, target)
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
imageService := containerdClient.ImageService()
image, err := imageService.Get(ctx, source)
if err != nil {
logrus.Debugf("image tag err: %s", err.Error())
logrus.Errorf("imagetag imageService Get error: %s", err.Error())
return err
}
logrus.Debugf("change image tag success")
image.Name = target
if _, err = imageService.Create(ctx, image); err != nil {
if errdefs.IsAlreadyExists(err) {
if err = imageService.Delete(ctx, image.Name); err != nil {
logrus.Errorf("imagetag imageService Delete error: %s", err.Error())
return err
}
if _, err = imageService.Create(ctx, image); err != nil {
logrus.Errorf("imageService Create error: %s", err.Error())
return err
}
} else {
logrus.Errorf("imagetag imageService Create error: %s", err.Error())
return err
}
}
logrus.Info("change image tag success")
printLog(logger, "info", "change image tag success", map[string]string{"step": "changetag"})
return nil
}
@ -208,7 +213,7 @@ func GenSaveImageName(name string) string {
//ImagePush push image to registry
//timeout minutes of the unit
func ImagePush(dockerCli *client.Client, image, user, pass string, logger event.Logger, timeout int) error {
func ImagePush(containerdClient *containerd.Client, image, user, pass string, logger event.Logger, timeout int) error {
printLog(logger, "info", fmt.Sprintf("start push image%s", image), map[string]string{"step": "pushimage"})
if timeout < 1 {
timeout = 1
@ -223,62 +228,45 @@ func ImagePush(dockerCli *client.Client, image, user, pass string, logger event.
if err != nil {
return err
}
var opts types.ImagePushOptions
pushauth, err := EncodeAuthToBase64(types.AuthConfig{
Username: user,
Password: pass,
ServerAddress: reference.Domain(ref),
})
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
getImage, err := containerdClient.GetImage(ctx, ref.String())
if err != nil {
logrus.Errorf("make auth base63 push image error: %s", err.Error())
if logger != nil {
logger.Error(fmt.Sprintf("Failed to generate a token to get the image"), map[string]string{"step": "builder-exector", "status": "failure"})
}
logrus.Errorf("containerdClient get Image error: %s", err.Error())
return err
}
opts.RegistryAuth = pushauth
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*time.Duration(timeout))
defer cancel()
readcloser, err := dockerCli.ImagePush(ctx, image, opts)
defaultTLS := &tls.Config{
InsecureSkipVerify: true,
}
hostOpt := config.HostOptions{}
hostOpt.DefaultTLS = defaultTLS
hostOpt.Credentials = func(host string) (string, string, error) {
return user, pass, nil
}
options := docker.ResolverOptions{
Tracker: docker.NewInMemoryTracker(),
Hosts: config.ConfigureHosts(ctx, hostOpt),
}
pushOpts := []containerd.RemoteOpt{
containerd.WithResolver(docker.NewResolver(options)),
}
logrus.Info("getImage.Target", getImage.Target())
err = containerdClient.Push(ctx, image, getImage.Target(), pushOpts...)
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
printLog(logger, "error", fmt.Sprintf("image %s does not exist, cannot be pushed", image), map[string]string{"step": "pushimage", "status": "failure"})
return fmt.Errorf("Image(%s) does not exist", image)
}
logrus.Errorf("containerdClient Push Image error: %s", err.Error())
return err
}
if readcloser != nil {
defer readcloser.Close()
dec := json.NewDecoder(readcloser)
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Error != nil {
return jm.Error
}
printLog(logger, "debug", jm.JSONString(), map[string]string{"step": "progress"})
}
}
// create a container
printLog(logger, "info", fmt.Sprintf("success push image%s", image), map[string]string{"step": "pushimage"})
return nil
}
//TrustedImagePush push image to trusted registry
func TrustedImagePush(dockerCli *client.Client, image, user, pass string, logger event.Logger, timeout int) error {
func TrustedImagePush(containerdClient *containerd.Client, image, user, pass string, logger event.Logger, timeout int) error {
if err := CheckTrustedRepositories(image, user, pass); err != nil {
return err
}
return ImagePush(dockerCli, image, user, pass, logger, timeout)
return ImagePush(containerdClient, image, user, pass, logger, timeout)
}
//CheckTrustedRepositories check Repositories is exist ,if not create it.
@ -339,40 +327,63 @@ func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
return base64.URLEncoding.EncodeToString(buf), nil
}
//ImageBuild ImageBuild
func ImageBuild(dockerCli *client.Client, contextDir string, options types.ImageBuildOptions, logger event.Logger, timeout int) error {
var ctx context.Context
if timeout != 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), time.Minute*time.Duration(timeout))
defer cancel()
} else {
ctx = context.Background()
}
buildCtx, err := archive.TarWithOptions(contextDir, &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: []string{""},
IncludeFiles: []string{"."},
})
//ImageBuild use kaniko build image
func ImageBuild(contextDir, RbdNamespace, ServiceID, DeployVersion string, logger event.Logger, buildType string) error {
// create image name
buildImageName := CreateImageName(ServiceID, DeployVersion)
// The same component retains only one build task to perform
jobList, err := jobc.GetJobController().GetServiceJobs(ServiceID)
if err != nil {
return err
logrus.Errorf("get pre build job for service %s failure ,%s", ServiceID, err.Error())
}
rc, err := dockerCli.ImageBuild(ctx, buildCtx, options)
if err != nil {
return err
if len(jobList) > 0 {
for _, job := range jobList {
jobc.GetJobController().DeleteJob(job.Name)
}
}
defer rc.Body.Close()
name := fmt.Sprintf("%s-%s", ServiceID, DeployVersion)
namespace := RbdNamespace
job := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
"service": ServiceID,
"job": "codebuild",
},
},
}
podSpec := corev1.PodSpec{RestartPolicy: corev1.RestartPolicyOnFailure} // only support never and onfailure
volumes, volumeMounts := CreateVolumesAndMounts(contextDir, buildType)
podSpec.Volumes = volumes
// container config
container := corev1.Container{
Name: name,
Image: "yangk/executor:latest",
Stdin: true,
StdinOnce: true,
Args: []string{"--context=dir:///workspace", fmt.Sprintf("--destination=%s", buildImageName), "--skip-tls-verify"},
}
container.VolumeMounts = volumeMounts
podSpec.Containers = append(podSpec.Containers, container)
job.Spec = podSpec
writer := logger.GetWriter("builder", "info")
reChan := channels.NewRingChannel(10)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var out io.Writer
if logger != nil {
out = logger.GetWriter("build-progress", "info")
} else {
out, _ = os.OpenFile("/tmp/build.log", os.O_RDWR|os.O_CREATE, 0755)
}
err = jsonmessage.DisplayJSONMessagesStream(rc.Body, out, 0, true, nil)
logrus.Debugf("create job[name: %s; namespace: %s]", job.Name, job.Namespace)
err = jobc.GetJobController().ExecJob(ctx, &job, writer, reChan)
if err != nil {
out.Write([]byte("\n"))
logrus.Errorf("read build log failure %s", err.Error())
logrus.Errorf("create new job:%s failed: %s", name, err.Error())
return err
}
logger.Info(util.Translation("create build code job success"), map[string]string{"step": "build-exector"})
// delete job after complete
defer jobc.GetJobController().DeleteJob(job.Name)
err = WaitingComplete(reChan)
if err != nil {
logrus.Errorf("waiting complete failed: %s", err.Error())
return err
}
return nil
@ -527,15 +538,18 @@ func CopyToFile(outfile string, r io.Reader) error {
}
//ImageRemove remove image
func ImageRemove(dockerCli *client.Client, image string) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
_, err := dockerCli.ImageRemove(ctx, image, types.ImageRemoveOptions{Force: true})
func ImageRemove(containerdClient *containerd.Client, image string) error {
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
imageStore := containerdClient.ImageService()
err := imageStore.Delete(ctx, image)
if err != nil {
logrus.Errorf("image remove ")
}
return err
}
// CheckIfImageExists -
func CheckIfImageExists(dockerCli *client.Client, image string) (imageName string, isExists bool, err error) {
func CheckIfImageExists(containerdClient *containerd.Client, image string) (imageName string, isExists bool, err error) {
repo, err := reference.Parse(image)
if err != nil {
return "", false, fmt.Errorf("parse image %s: %v", image, err)
@ -547,17 +561,13 @@ func CheckIfImageExists(dockerCli *client.Client, image string) (imageName strin
}
imageFullName := named.Name() + ":" + tag
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
imageSummarys, err := dockerCli.ImageList(ctx, dtypes.ImageListOptions{
Filters: filters.NewArgs(filters.KeyValuePair{Key: "reference", Value: imageFullName}),
})
ctx := namespaces.WithNamespace(context.Background(), "rbd-ctr")
imageSummarys, err := containerdClient.ListImages(ctx)
if err != nil {
return "", false, fmt.Errorf("list images: %v", err)
}
for _, imageSummary := range imageSummarys {
fmt.Printf("%#v", imageSummary.RepoTags)
fmt.Printf("%#v", imageSummary.Name())
}
_ = imageSummarys
@ -567,12 +577,17 @@ func CheckIfImageExists(dockerCli *client.Client, image string) (imageName strin
// ImagesPullAndPush Used to process mirroring of non local components, example: builder, runner, /rbd-mesh-data-panel
func ImagesPullAndPush(sourceImage, targetImage, username, password string, logger event.Logger) error {
dockerClient, err := client.NewClientWithOpts(client.FromEnv)
var sock string
sock = os.Getenv("CONTAINERD_SOCK")
if sock == "" {
sock = "/run/containerd/containerd.sock"
}
containerdClient, err := containerd.New(sock)
if err != nil {
logrus.Errorf("create docker client failed: %s", err.Error())
return err
}
sourceImage, exists, err := CheckIfImageExists(dockerClient, sourceImage)
sourceImage, exists, err := CheckIfImageExists(containerdClient, sourceImage)
if err != nil {
logrus.Errorf("failed to check whether the builder mirror exists: %s", err.Error())
return err
@ -580,15 +595,15 @@ func ImagesPullAndPush(sourceImage, targetImage, username, password string, logg
logrus.Debugf("source image %v, targetImage %v, exists %v", sourceImage, exists)
if !exists {
hubUser, hubPass := builder.GetImageUserInfoV2(sourceImage, username, password)
if _, err := ImagePull(dockerClient, targetImage, hubUser, hubPass, logger, 15); err != nil {
if _, err := ImagePull(containerdClient, targetImage, hubUser, hubPass, logger, 15); err != nil {
printLog(logger, "error", fmt.Sprintf("pull image %s failed %v", targetImage, err), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
if err := ImageTag(dockerClient, targetImage, sourceImage, logger, 15); err != nil {
if err := ImageTag(containerdClient, targetImage, sourceImage, logger, 15); err != nil {
printLog(logger, "error", fmt.Sprintf("change image tag %s to %s failed", targetImage, sourceImage), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
if err := ImagePush(dockerClient, sourceImage, hubUser, hubPass, logger, 15); err != nil {
if err := ImagePush(containerdClient, sourceImage, hubUser, hubPass, logger, 15); err != nil {
printLog(logger, "error", fmt.Sprintf("push image %s failed %v", sourceImage, err), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
@ -618,3 +633,144 @@ func printLog(logger event.Logger, level, msg string, info map[string]string) {
}
}
}
func CreateImageName(ServiceID, DeployVersion string) string {
imageName := strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, ServiceID, DeployVersion))
logrus.Info("imageName:", imageName)
component, err := db.GetManager().TenantServiceDao().GetServiceByID(ServiceID)
if err != nil {
logrus.Errorf("image build get service by id error: %v", err)
return imageName
}
app, err := db.GetManager().ApplicationDao().GetByServiceID(ServiceID)
if err != nil {
logrus.Errorf("image build get app by serviceid error: %v", err)
return imageName
}
tenant, err := db.GetManager().TenantDao().GetTenantByUUID(component.TenantID)
if err != nil {
logrus.Errorf("image build get tenant by uuid error: %v", err)
return imageName
}
workloadName := fmt.Sprintf("%s-%s-%s", tenant.Namespace, app.K8sApp, component.K8sComponentName)
return strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, workloadName, DeployVersion))
}
func CreateVolumesAndMounts(contextDir, buildType string) (volumes []corev1.Volume, volumeMounts []corev1.VolumeMount) {
pathSplit := strings.Split(contextDir, "/")
subPath := strings.Join(pathSplit[2:], "/")
hostPathType := corev1.HostPathDirectoryOrCreate
hostsFilePathType := corev1.HostPathFile
// kaniko volumes volumeMounts config
volumes = []corev1.Volume{
{
Name: "kaniko-secret",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "rbd-hub-credentials",
Items: []corev1.KeyToPath{
{
Key: ".dockerconfigjson",
Path: "config.json",
},
},
},
},
},
{
Name: "hosts",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc/hosts",
Type: &hostsFilePathType,
},
},
},
}
volumeMounts = []corev1.VolumeMount{
{
Name: "kaniko-secret",
MountPath: "/kaniko/.docker",
},
{
Name: "hosts",
MountPath: "/etc/hosts",
},
}
// Customize it according to how it is built volumes volumeMounts config
if buildType == "plug-build" {
volume := corev1.Volume{
Name: "plug-build",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: contextDir,
Type: &hostPathType,
},
},
}
volumes = append(volumes, volume)
volumeMount := corev1.VolumeMount{
Name: "plug-build",
MountPath: "/workspace",
}
volumeMounts = append(volumeMounts, volumeMount)
}
if buildType == "run-build" {
volume := corev1.Volume{
Name: "run-build",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "rbd-cpt-grdata",
},
},
}
volumes = append(volumes, volume)
volumeMount := corev1.VolumeMount{
Name: "run-build",
MountPath: "/workspace",
SubPath: subPath,
}
volumeMounts = append(volumeMounts, volumeMount)
}
return volumes, volumeMounts
}
func WaitingComplete(reChan *channels.RingChannel) (err error) {
var logComplete = false
var jobComplete = false
timeOut := time.NewTimer(time.Minute * 60)
for {
select {
case <-timeOut.C:
return fmt.Errorf("build time out (more than 60 minute)")
case jobStatus := <-reChan.Out():
status := jobStatus.(string)
switch status {
case "complete":
jobComplete = true
if logComplete {
return nil
}
logrus.Info(util.Translation("build code job exec completed"), map[string]string{"step": "build-exector"})
case "failed":
jobComplete = true
err = fmt.Errorf("build code job exec failure")
if logComplete {
return err
}
logrus.Info(util.Translation("build code job exec failed"), map[string]string{"step": "build-exector"})
case "cancel":
jobComplete = true
err = fmt.Errorf("build code job is canceled")
if logComplete {
return err
}
case "logcomplete":
logComplete = true
if jobComplete {
return err
}
}
}
}
}

170
go.mod
View File

@ -1,12 +1,10 @@
module github.com/goodrain/rainbond
go 1.15
go 1.17
require (
cuelang.org/go v0.2.2
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 // indirect
github.com/aliyun/aliyun-oss-go-sdk v2.1.5+incompatible
github.com/atcdot/gorm-bulk-upsert v1.0.0
github.com/aws/aws-sdk-go v1.36.15
@ -48,10 +46,8 @@ require (
github.com/golang/protobuf v1.5.2
github.com/goodrain/rainbond-oam v0.0.0-20220908064513-fed01f0e1a1b
github.com/goodrain/rainbond-operator v1.3.1-0.20210401055914-f8fe4bf89a21
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/websocket v1.4.2
github.com/gosuri/uitable v0.0.4
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/howeyc/fsnotify v0.9.0
github.com/imdario/mergo v0.3.12
github.com/jinzhu/gorm v1.9.16
@ -68,6 +64,7 @@ require (
github.com/onsi/ginkgo v1.14.1
github.com/onsi/gomega v1.10.3
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2
github.com/pborman/uuid v1.2.1
github.com/pebbe/zmq4 v1.2.1
github.com/pkg/errors v0.9.1
@ -96,7 +93,6 @@ require (
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/grpc v1.40.0
google.golang.org/protobuf v1.27.1
gopkg.in/alecthomas/kingpin.v2 v2.2.6
@ -117,6 +113,168 @@ require (
sigs.k8s.io/yaml v1.2.0
)
require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
github.com/Masterminds/squirrel v1.5.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beevik/ntp v0.3.0 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 // indirect
github.com/cockroachdb/apd/v2 v2.0.1 // indirect
github.com/containerd/cgroups v1.0.1 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
github.com/containerd/typeurl v1.0.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deislabs/oras v0.10.0 // indirect
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect
github.com/eapache/queue v1.1.0 // indirect
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043 // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect
github.com/evanphx/json-patch v4.9.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/go-logfmt/logfmt v0.5.0 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-logr/zapr v0.4.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/spec v0.19.8 // indirect
github.com/go-openapi/swag v0.19.11 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.2.0 // indirect
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hodgesds/perf-utils v0.0.8 // indirect
github.com/huandu/xstrings v1.3.1 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmoiron/sqlx v1.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect
github.com/klauspost/compress v1.11.13 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/lib/pq v1.9.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/lufia/iostat v1.1.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mattn/go-xmlrpc v0.0.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mdlayher/genetlink v1.0.0 // indirect
github.com/mdlayher/netlink v1.1.0 // indirect
github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/reflectwalk v1.0.0 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mozillazg/go-pinyin v0.18.0 // indirect
github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 // indirect
github.com/nxadm/tail v1.4.4 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/opencontainers/selinux v1.10.1 // indirect
github.com/pelletier/go-toml v1.8.1 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 // indirect
github.com/russross/blackfriday v1.5.2 // indirect
github.com/russross/blackfriday/v2 v2.0.1 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/siebenmann/go-kstat v0.0.0-20200303194639-4e8294f9e9d5 // indirect
github.com/smartystreets/assertions v1.0.1 // indirect
github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v1.1.1 // indirect
github.com/src-d/gcfg v1.4.0 // indirect
github.com/tidwall/match v1.0.3 // indirect
github.com/tidwall/pretty v1.0.2 // indirect
github.com/xanzy/ssh-agent v0.2.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.opencensus.io v0.22.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.16.0 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.1 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect
gopkg.in/gorp.v1 v1.7.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20201113003025-83324d819ded // indirect
k8s.io/klog/v2 v2.5.0 // indirect
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect
k8s.io/kubectl v0.20.4 // indirect
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
sigs.k8s.io/kustomize v2.0.3+incompatible // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect
)
// Pinned to kubernetes-1.20.0
replace (
github.com/atcdot/gorm-bulk-upsert => github.com/goodrain/gorm-bulk-upsert v1.0.1-0.20210608013724-7e7870d16357

4
go.sum
View File

@ -484,8 +484,6 @@ github.com/docker/cli v20.10.3+incompatible h1:WVEgoV/GpsTK5hruhHdYi79blQ+nmcm+7
github.com/docker/cli v20.10.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d h1:jC8tT/S0OGx2cswpeUTn4gOIea8P08lD3VFQT0cOZ50=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo=
github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw=
github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
@ -1279,7 +1277,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
@ -1787,7 +1784,6 @@ go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHt
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 mozillazg
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,40 +0,0 @@
/*
Package pinyin : 汉语拼音转换工具.
Usage
package main
import (
"fmt"
"github.com/mozillazg/go-pinyin"
)
func main() {
hans := "中国人"
// 默认
a := pinyin.NewArgs()
fmt.Println(pinyin.Pinyin(hans, a))
// [[zhong] [guo] [ren]]
// 包含声调
a.Style = pinyin.Tone
fmt.Println(pinyin.Pinyin(hans, a))
// [[zhōng] [guó] [rén]]
// 声调用数字表示
a.Style = pinyin.Tone2
fmt.Println(pinyin.Pinyin(hans, a))
// [[zho1ng] [guo2] [re2n]]
// 开启多音字模式
a = pinyin.NewArgs()
a.Heteronym = true
fmt.Println(pinyin.Pinyin(hans, a))
// [[zhong zhong] [guo] [ren]]
a.Style = pinyin.Tone2
fmt.Println(pinyin.Pinyin(hans, a))
// [[zho1ng zho4ng] [guo2] [re2n]]
}
*/
package pinyin

View File

@ -1,33 +0,0 @@
package pinyin
// 带音标字符。
var phoneticSymbol = map[string]string{
"ā": "a1",
"á": "a2",
"ǎ": "a3",
"à": "a4",
"ē": "e1",
"é": "e2",
"ě": "e3",
"è": "e4",
"ō": "o1",
"ó": "o2",
"ǒ": "o3",
"ò": "o4",
"ī": "i1",
"í": "i2",
"ǐ": "i3",
"ì": "i4",
"ū": "u1",
"ú": "u2",
"ǔ": "u3",
"ù": "u4",
"ü": "v",
"ǘ": "v2",
"ǚ": "v3",
"ǜ": "v4",
"ń": "n2",
"ň": "n3",
"ǹ": "n4",
"ḿ": "m2",
}

View File

@ -1,275 +0,0 @@
package pinyin
import (
"regexp"
"strings"
)
// Meta
const (
Version = "0.18.0"
Author = "mozillazg, 闲耘"
License = "MIT"
Copyright = "Copyright (c) 2016 mozillazg, 闲耘"
)
// 拼音风格(推荐)
const (
Normal = 0 // 普通风格,不带声调(默认风格)。如: zhong guo
Tone = 1 // 声调风格1拼音声调在韵母第一个字母上。如 zhōng guó
Tone2 = 2 // 声调风格2即拼音声调在各个韵母之后用数字 [1-4] 进行表示。如: zho1ng guo2
Tone3 = 8 // 声调风格3即拼音声调在各个拼音之后用数字 [1-4] 进行表示。如: zhong1 guo2
Initials = 3 // 声母风格,只返回各个拼音的声母部分。如: zh g 。注意:不是所有的拼音都有声母
FirstLetter = 4 // 首字母风格,只返回拼音的首字母部分。如: z g
Finals = 5 // 韵母风格,只返回各个拼音的韵母部分,不带声调。如: ong uo
FinalsTone = 6 // 韵母风格1带声调声调在韵母第一个字母上。如 ōng uó
FinalsTone2 = 7 // 韵母风格2带声调声调在各个韵母之后用数字 [1-4] 进行表示。如: o1ng uo2
FinalsTone3 = 9 // 韵母风格3带声调声调在各个拼音之后用数字 [1-4] 进行表示。如: ong1 uo2
)
// 拼音风格(兼容之前的版本)
const (
NORMAL = Normal
TONE = Tone
TONE2 = Tone2
INITIALS = Initials
FIRST_LETTER = FirstLetter
FINALS = Finals
FINALS_TONE = FinalsTone
FINALS_TONE2 = FinalsTone2
)
// 声母表
var initialArray = strings.Split(
"b,p,m,f,d,t,n,l,g,k,h,j,q,x,r,zh,ch,sh,z,c,s",
",",
)
// 所有带声调的字符
var rePhoneticSymbolSource = func(m map[string]string) string {
s := ""
for k := range m {
s = s + k
}
return s
}(phoneticSymbol)
// 匹配带声调字符的正则表达式
var rePhoneticSymbol = regexp.MustCompile("[" + rePhoneticSymbolSource + "]")
// 匹配使用数字标识声调的字符的正则表达式
var reTone2 = regexp.MustCompile("([aeoiuvnm])([1-4])$")
// 匹配 Tone2 中标识韵母声调的正则表达式
var reTone3 = regexp.MustCompile("^([a-z]+)([1-4])([a-z]*)$")
// Args 配置信息
type Args struct {
Style int // 拼音风格(默认: Normal)
Heteronym bool // 是否启用多音字模式(默认:禁用)
Separator string // Slug 中使用的分隔符(默认:-)
// 处理没有拼音的字符(默认忽略没有拼音的字符)
// 函数返回的 slice 的长度为0 则表示忽略这个字符
Fallback func(r rune, a Args) []string
}
// Style 默认配置:风格
var Style = Normal
// Heteronym 默认配置:是否启用多音字模式
var Heteronym = false
// Separator 默认配置: `Slug` 中 Join 所用的分隔符
var Separator = "-"
// Fallback 默认配置: 如何处理没有拼音的字符(忽略这个字符)
var Fallback = func(r rune, a Args) []string {
return []string{}
}
var finalExceptionsMap = map[string]string{
"ū": "ǖ",
"ú": "ǘ",
"ǔ": "ǚ",
"ù": "ǜ",
}
var reFinalExceptions = regexp.MustCompile("^(j|q|x)(ū|ú|ǔ|ù)$")
var reFinal2Exceptions = regexp.MustCompile("^(j|q|x)u(\\d?)$")
// NewArgs 返回包含默认配置的 `Args`
func NewArgs() Args {
return Args{Style, Heteronym, Separator, Fallback}
}
// 获取单个拼音中的声母
func initial(p string) string {
s := ""
for _, v := range initialArray {
if strings.HasPrefix(p, v) {
s = v
break
}
}
return s
}
// 获取单个拼音中的韵母
func final(p string) string {
n := initial(p)
if n == "" {
return handleYW(p)
}
// 特例 j/q/x
matches := reFinalExceptions.FindStringSubmatch(p)
// jū -> jǖ
if len(matches) == 3 && matches[1] != "" && matches[2] != "" {
v, _ := finalExceptionsMap[matches[2]]
return v
}
// ju -> jv, ju1 -> jv1
p = reFinal2Exceptions.ReplaceAllString(p, "${1}v$2")
return strings.Join(strings.SplitN(p, n, 2), "")
}
// 处理 y, w
func handleYW(p string) string {
// 特例 y/w
if strings.HasPrefix(p, "yu") {
p = "v" + p[2:] // yu -> v
} else if strings.HasPrefix(p, "yi") {
p = p[1:] // yi -> i
} else if strings.HasPrefix(p, "y") {
p = "i" + p[1:] // y -> i
} else if strings.HasPrefix(p, "wu") {
p = p[1:] // wu -> u
} else if strings.HasPrefix(p, "w") {
p = "u" + p[1:] // w -> u
}
return p
}
func toFixed(p string, a Args) string {
if a.Style == Initials {
return initial(p)
}
origP := p
// 替换拼音中的带声调字符
py := rePhoneticSymbol.ReplaceAllStringFunc(p, func(m string) string {
symbol, _ := phoneticSymbol[m]
switch a.Style {
// 不包含声调
case Normal, FirstLetter, Finals:
// 去掉声调: a1 -> a
m = reTone2.ReplaceAllString(symbol, "$1")
case Tone2, FinalsTone2, Tone3, FinalsTone3:
// 返回使用数字标识声调的字符
m = symbol
default:
// 声调在头上
}
return m
})
switch a.Style {
// 将声调移动到最后
case Tone3, FinalsTone3:
py = reTone3.ReplaceAllString(py, "$1$3$2")
}
switch a.Style {
// 首字母
case FirstLetter:
py = string([]rune(py)[0])
// 韵母
case Finals, FinalsTone, FinalsTone2, FinalsTone3:
// 转换为 []rune unicode 编码用于获取第一个拼音字符
// 因为 string 是 utf-8 编码不方便获取第一个拼音字符
rs := []rune(origP)
switch string(rs[0]) {
// 因为鼻音没有声母所以不需要去掉声母部分
case "ḿ", "ń", "ň", "ǹ":
default:
py = final(py)
}
}
return py
}
func applyStyle(p []string, a Args) []string {
newP := []string{}
for _, v := range p {
newP = append(newP, toFixed(v, a))
}
return newP
}
// SinglePinyin 把单个 `rune` 类型的汉字转换为拼音.
func SinglePinyin(r rune, a Args) []string {
if a.Fallback == nil {
a.Fallback = Fallback
}
value, ok := PinyinDict[int(r)]
pys := []string{}
if ok {
pys = strings.Split(value, ",")
} else {
pys = a.Fallback(r, a)
}
if len(pys) > 0 {
if !a.Heteronym {
pys = []string{pys[0]}
}
return applyStyle(pys, a)
}
return pys
}
// Pinyin 汉字转拼音,支持多音字模式.
func Pinyin(s string, a Args) [][]string {
pys := [][]string{}
for _, r := range s {
py := SinglePinyin(r, a)
if len(py) > 0 {
pys = append(pys, py)
}
}
return pys
}
// LazyPinyin 汉字转拼音,与 `Pinyin` 的区别是:
// 返回值类型不同,并且不支持多音字模式,每个汉字只取第一个音.
func LazyPinyin(s string, a Args) []string {
a.Heteronym = false
pys := []string{}
for _, v := range Pinyin(s, a) {
pys = append(pys, v[0])
}
return pys
}
// Slug join `LazyPinyin` 的返回值.
// 建议改用 https://github.com/mozillazg/go-slugify
func Slug(s string, a Args) string {
separator := a.Separator
return strings.Join(LazyPinyin(s, a), separator)
}
// Convert 跟 Pinyin 的唯一区别就是 a 参数可以是 nil
func Convert(s string, a *Args) [][]string {
if a == nil {
args := NewArgs()
a = &args
}
return Pinyin(s, *a)
}
// LazyConvert 跟 LazyPinyin 的唯一区别就是 a 参数可以是 nil
func LazyConvert(s string, a *Args) []string {
if a == nil {
args := NewArgs()
a = &args
}
return LazyPinyin(s, *a)
}

File diff suppressed because it is too large Load Diff