Merge branch 'etcdv2' of https://github.com/fanyangyang/rainbond into etcdv2

This commit is contained in:
凡羊羊 2020-02-04 14:54:20 +08:00
commit 660c831837
7 changed files with 765 additions and 5 deletions

View File

@ -19,6 +19,7 @@
package build
import (
"context"
"fmt"
"regexp"
"strings"
@ -26,6 +27,7 @@ import (
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/event"
"k8s.io/client-go/kubernetes"
"github.com/docker/docker/client"
)
@ -87,7 +89,19 @@ type Request struct {
BuildEnvs map[string]string
Logger event.Logger
DockerClient *client.Client
KubeClient kubernetes.Interface
ExtraHosts []string
HostAlias []HostAlias
Ctx context.Context
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
//Commit Commit

View File

@ -29,16 +29,25 @@ import (
"os/exec"
"path"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/fsnotify/fsnotify"
"github.com/pquerna/ffjson/ffjson"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
"github.com/pquerna/ffjson/ffjson"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
)
func slugBuilder() (Build, error) {
@ -58,7 +67,7 @@ func (s *slugBuild) Build(re *Request) (*Response, error) {
s.re = re
s.buildCacheDir = re.CacheDir
packageName := fmt.Sprintf("%s/%s.tgz", s.tgzDir, re.DeployVersion)
if err := s.runBuildContainer(re); err != nil {
if err := s.runBuildJob(re); err != nil {
re.Logger.Error(util.Translation("Compiling the source code failure"), map[string]string{"step": "build-code", "status": "failure"})
logrus.Error("build slug in container error,", err.Error())
return nil, err
@ -231,6 +240,258 @@ func (s *slugBuild) getSourceCodeTarFile(re *Request) (*os.File, error) {
return os.OpenFile(sourceTarFile, os.O_RDONLY, 0755)
}
func (s *slugBuild) prepareSourceCodeFile(re *Request) error {
var cmd []string
if re.ServerType == "svn" {
cmd = append(cmd, "rm", "-rf", path.Join(re.SourceDir, "./.svn"))
}
if re.ServerType == "git" {
cmd = append(cmd, "rm", "-rf", path.Join(re.SourceDir, "./.git"))
}
source := exec.Command(cmd[0], cmd[1:]...)
if err := source.Run(); err != nil {
return err
}
logrus.Debug("delete .git and .svn folder success")
return nil
}
func (s *slugBuild) runBuildJob(re *Request) error {
ctx, cancel := context.WithCancel(re.Ctx)
defer cancel()
logrus.Info("start build job")
// delete .git and .svn folder
if err := s.prepareSourceCodeFile(re); err != nil {
logrus.Error("delete .git and .svn folder error")
return err
}
name := re.ServiceID
namespace := "rbd-system"
envs := []corev1.EnvVar{
corev1.EnvVar{Name: "SLUG_VERSION", Value: re.DeployVersion},
corev1.EnvVar{Name: "SERVICE_ID", Value: re.ServiceID},
corev1.EnvVar{Name: "TENANT_ID", Value: re.TenantID},
corev1.EnvVar{Name: "LANGUAGE", Value: re.Lang.String()},
corev1.EnvVar{Name: "DEBUG", Value: "true"},
}
for k, v := range re.BuildEnvs {
envs = append(envs, corev1.EnvVar{Name: k, Value: v})
if k == "PROC_ENV" {
var mapdata = make(map[string]interface{})
if err := json.Unmarshal([]byte(v), &mapdata); err == nil {
if runtime, ok := mapdata["runtimes"]; ok {
envs = append(envs, corev1.EnvVar{Name: "RUNTIME", Value: runtime.(string)})
}
}
}
}
job := batchv1.Job{}
job.Name = name
job.Namespace = namespace
podTempSpec := corev1.PodTemplateSpec{}
podTempSpec.Name = name
podTempSpec.Namespace = namespace
podSpec := corev1.PodSpec{RestartPolicy: corev1.RestartPolicyOnFailure} // only support never and onfailure
podSpec.Volumes = []corev1.Volume{
corev1.Volume{
Name: "slug",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "grdata",
},
},
},
corev1.Volume{
Name: "app",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "cache",
},
},
},
}
container := corev1.Container{Name: name, Image: builder.BUILDERIMAGENAME, Stdin: true, StdinOnce: true}
container.Env = envs
container.Args = []string{"local"}
slugSubPath := strings.TrimPrefix(re.TGZDir, "/grdata/")
logrus.Debugf("slug subpath is : %s", slugSubPath)
appSubPath := strings.TrimPrefix(re.SourceDir, "/cache/")
logrus.Debugf("app subpath is : %s", appSubPath)
cacheSubPath := strings.TrimPrefix((re.CacheDir), "/cache/")
container.VolumeMounts = []corev1.VolumeMount{
corev1.VolumeMount{
Name: "app",
MountPath: "/tmp/cache",
SubPath: cacheSubPath,
},
corev1.VolumeMount{
Name: "slug",
MountPath: "/tmp/slug",
SubPath: slugSubPath,
},
corev1.VolumeMount{
Name: "app",
MountPath: "/tmp/app",
SubPath: appSubPath,
},
}
podSpec.Containers = append(podSpec.Containers, container)
for _, ha := range re.HostAlias {
podSpec.HostAliases = append(podSpec.HostAliases, corev1.HostAlias{IP: ha.IP, Hostnames: ha.Hostnames})
}
podTempSpec.Spec = podSpec
job.Spec.Template = podTempSpec
_, err := re.KubeClient.BatchV1().Jobs(namespace).Create(&job)
if err != nil {
return err
}
defer delete(re.KubeClient, namespace, job.Name)
// get job builder log and delete job util it is finished
writer := re.Logger.GetWriter("builder", "info")
podChan := make(chan struct{})
go getJobPodLogs(ctx, podChan, re.KubeClient, writer, namespace, job.Name)
getJob(ctx, podChan, re.KubeClient, namespace, job.Name)
return nil
}
func getJob(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, namespace, name string) {
var job *batchv1.Job
jobWatch, err := clientset.BatchV1().Jobs(namespace).Watch(metav1.ListOptions{})
if err != nil {
return
}
for {
select {
case <-ctx.Done():
return
case event, ok := <-jobWatch.ResultChan():
if !ok {
logrus.Error("pod watch chan be closed")
return
}
switch event.Type {
case watch.Modified:
job, _ = event.Object.(*batchv1.Job)
if job.Status.Active > 0 {
logrus.Debug("pod is ready")
waitPod(ctx, podChan, clientset, namespace, name)
podChan <- struct{}{}
}
if job.Status.Succeeded > 0 || job.Status.Failed > 0 {
logrus.Debug("job is finished")
return
}
}
default:
}
}
}
func waitPod(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, namespace, name string) {
logrus.Debug("waiting pod")
var pod *corev1.Pod
labelSelector := fmt.Sprintf("job-name=%s", name)
podWatch, err := clientset.CoreV1().Pods(namespace).Watch(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return
}
for {
select {
case <-ctx.Done():
return
case event, ok := <-podWatch.ResultChan():
if !ok {
logrus.Error("pod watch chan be closed")
return
}
switch event.Type {
case watch.Added, watch.Modified:
pod, _ = event.Object.(*corev1.Pod)
logrus.Debugf("pod status is : %s", pod.Status.Phase)
if len(pod.Status.ContainerStatuses) > 0 && pod.Status.ContainerStatuses[0].Ready {
logrus.Debug("pod is running")
return
}
}
default:
}
}
}
func getJobPodLogs(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, writer event.LoggerWriter, namespace, job string) {
once := sync.Once{}
for {
select {
case <-ctx.Done():
return
case <-podChan:
once.Do(func() {
logrus.Debug("pod ready")
labelSelector := fmt.Sprintf("job-name=%s", job)
pods, err := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
logrus.Errorf("do not found job's pod, %s", err.Error())
return
}
logrus.Debug("pod name is : ", pods.Items[0].Name)
podLogRequest := clientset.CoreV1().Pods(namespace).GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{Follow: true})
reader, err := podLogRequest.Stream()
if err != nil {
logrus.Warnf("get build job pod log data error: %s, retry net loop", err.Error())
return
}
defer reader.Close()
bufReader := bufio.NewReader(reader)
for {
line, err := bufReader.ReadBytes('\n')
writer.Write(line)
if err == io.EOF {
logrus.Info("get job log finished(io.EOF)")
return
}
if err != nil {
logrus.Warningf("get job log error: %s", err.Error())
return
}
}
})
default:
}
}
}
func delete(clientset kubernetes.Interface, namespace, job string) {
logrus.Debugf("start delete job: %s", job)
listOptions := metav1.ListOptions{LabelSelector:fmt.Sprintf("job-name=%s", job)}
pods, err := clientset.CoreV1().Pods(namespace).List(listOptions)
if err != nil {
logrus.Errorf("get job's pod error: %s", err.Error())
return
}
logrus.Debugf("get pod len : %d", len(pods.Items))
if err := clientset.CoreV1().Pods(namespace).DeleteCollection(&metav1.DeleteOptions{}, listOptions); err != nil {
logrus.Errorf("delete job pod failed: %s", err.Error())
}
// delete job
if err := clientset.BatchV1().Jobs(namespace).Delete(job, &metav1.DeleteOptions{}); err != nil {
logrus.Errorf("delete job failed: %s", err.Error())
}
logrus.Debug("delete job finish")
}
func (s *slugBuild) runBuildContainer(re *Request) error {
envs := []*sources.KeyValue{
&sources.KeyValue{Key: "SLUG_VERSION", Value: re.DeployVersion},

View File

@ -0,0 +1,308 @@
package build
import (
"archive/tar"
"bufio"
"compress/gzip"
"context"
"fmt"
"io"
"os"
"strings"
"testing"
"time"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/cmd/builder/option"
"github.com/goodrain/rainbond/event"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
etcdutil "github.com/goodrain/rainbond/util/etcd"
k8sutil "github.com/goodrain/rainbond/util/k8s"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func TestCreateJob(t *testing.T) {
conf := option.Config{
EtcdEndPoints: []string{"192.168.2.203:2379"},
MQAPI: "192.168.2.203:6300",
EventLogServers: []string{"192.168.2.203:6366"},
RbdRepoName: "rbd-dns",
RbdNamespace: "rbd-system",
MysqlConnectionInfo: "EeM2oc:lee7OhQu@tcp(192.168.2.203:3306)/region",
}
event.NewManager(event.EventConfig{
EventLogServers: conf.EventLogServers,
DiscoverArgs: &etcdutil.ClientArgs{Endpoints: conf.EtcdEndPoints},
})
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
if err != nil {
t.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
t.Fatal(err)
}
dockerClient, err := client.NewEnvClient()
if err != nil {
t.Fatal("new docker error: ", err.Error())
}
logger := event.GetManager().GetLogger("0000")
req := Request{
ServerType: "git",
DockerClient: dockerClient,
KubeClient: clientset,
ServiceID: "d9b8d718510dc53118af1e1219e36d3a",
DeployVersion: "123",
TenantID: "7c89455140284fd7b263038b44dc65bc",
Lang: code.JavaMaven,
Runtime: "1.8",
Logger: logger,
}
req.BuildEnvs = map[string]string{
"PROCFILE": "web: java $JAVA_OPTS -jar target/java-maven-demo-0.0.1.jar",
"PROC_ENV": `{"procfile": "", "dependencies": {}, "language": "Java-maven", "runtimes": "1.8"}`,
"RUNTIME": "1.8",
}
req.CacheDir = fmt.Sprintf("/cache/build/%s/cache/%s", req.TenantID, req.ServiceID)
req.TGZDir = fmt.Sprintf("/grdata/build/tenant/%s/slug/%s", req.TenantID, req.ServiceID)
req.SourceDir = fmt.Sprintf("/cache/source/build/%s/%s", req.TenantID, req.ServiceID)
sb := slugBuild{tgzDir: "string"}
if err := sb.runBuildJob(&req); err != nil {
t.Fatal(err)
}
fmt.Println("create job finished")
}
func Test1(t *testing.T) {
tarFile := "/opt/rainbond/pkg/rainbond-pkg-V5.2-dev.tgz"
srcFile, err := os.Open(tarFile)
if err != nil {
t.Fatal(err)
}
defer srcFile.Close()
gr, err := gzip.NewReader(srcFile) //handle gzip feature
if err != nil {
t.Fatal(err)
}
defer gr.Close()
tr := tar.NewReader(gr) // tar reader
now := time.Now()
for hdr, err := tr.Next(); err != io.EOF; hdr, err = tr.Next() { // next range tar info
if err != nil {
t.Fatal(err)
continue
}
// 读取文件信息
fi := hdr.FileInfo()
if !strings.HasPrefix(fi.Name(), "._") && strings.HasSuffix(fi.Name(), ".tgz") {
t.Logf("name: %s, size: %d", fi.Name(), fi.Size())
}
}
t.Logf("cost: %d", time.Since(now))
}
func TestDockerClient(t *testing.T) {
dockerClient, err := client.NewEnvClient()
if err != nil {
t.Fatal("new docker error: ", err.Error())
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
containers, err := dockerClient.ContainerList(ctx, types.ContainerListOptions{})
if err != nil {
t.Fatal(err)
}
for _, container := range containers {
t.Log("container id : ", container.ID)
}
// images, err := dockerClient.ImageList(ctx, types.ImageListOptions{})
// for _, image := range images {
// t.Log("image is : ", image.ID)
// }
}
func TestGetPogLog(t *testing.T) {
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
if err != nil {
t.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
t.Fatal(err)
}
re := Request{
KubeClient: clientset,
ServiceID: "aae30a8d6a66ea9024197bc8deecd137",
}
for {
fmt.Println("waiting job finish")
time.Sleep(5 * time.Second)
job, err := re.KubeClient.BatchV1().Jobs("rbd-system").Get(re.ServiceID, metav1.GetOptions{})
if err != nil {
fmt.Printf("get job error: %s", err.Error())
}
if job == nil {
continue
}
if job.Status.Active > 0 {
fmt.Println("build job start")
var po corev1.Pod
labelSelector := fmt.Sprintf("job-name=%s", re.ServiceID)
for {
pos, err := re.KubeClient.CoreV1().Pods("rbd-system").List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
fmt.Printf(" get po error: %s", err.Error())
}
if len(pos.Items) == 0 {
continue
}
if len(pos.Items[0].Spec.Containers) > 0 {
fmt.Println("pod container ready, start write log")
po = pos.Items[0]
break
}
time.Sleep(5 * time.Second)
}
podLogRequest := re.KubeClient.CoreV1().Pods("rbd-system").GetLogs(po.Name, &corev1.PodLogOptions{Follow: true})
reader, err := podLogRequest.Stream()
if err != nil {
fmt.Println("get build job pod log data error: ", err.Error())
continue
}
defer reader.Close()
bufReader := bufio.NewReader(reader)
for {
line, err := bufReader.ReadBytes('\n')
fmt.Println(string(line))
if err == io.EOF {
break
}
if err != nil {
fmt.Printf("get job log error: %s", err.Error())
break
}
}
}
if job.Status.Succeeded > 0 {
fmt.Println("build job have done successfully")
if err = re.KubeClient.BatchV1().Jobs("rbd-system").Delete(re.ServiceID, &metav1.DeleteOptions{}); err != nil {
fmt.Printf("delete job failed: %s", err.Error())
}
break
}
if job.Status.Failed > 0 {
fmt.Println("build job have done failed")
if err = re.KubeClient.BatchV1().Jobs("rbd-system").Delete(re.ServiceID, &metav1.DeleteOptions{}); err != nil {
fmt.Printf("delete job failed: %s", err.Error())
}
break
}
}
}
func TestDeleteJobAuto(t *testing.T) {
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
if err != nil {
t.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
t.Fatal(err)
}
job := batchv1.Job{}
job.Name = "fanyangyang"
job.Namespace = "rbd-system"
var ttl int32
ttl = 0
job.Spec.TTLSecondsAfterFinished = &ttl // k8s version >= 1.12
job.Spec = batchv1.JobSpec{
TTLSecondsAfterFinished: &ttl,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
corev1.Container{
Name: "fanyangyang",
Image: "busybox",
Command: []string{"echo", "hello job"},
},
},
},
},
}
_, err = clientset.BatchV1().Jobs(job.Namespace).Create(&job)
if err != nil {
t.Fatal("create job error: ", err.Error())
}
for {
j, err := clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
if err != nil {
t.Error("get job error: ", err.Error())
}
if j == nil {
continue
}
if j.Status.Active > 0 {
fmt.Println("job is running")
}
if j.Status.Succeeded > 0 {
fmt.Println("job is succeed, waiting auto delete")
break
}
if j.Status.Failed > 0 {
fmt.Println("job is failed, waiting next")
break
}
time.Sleep(5 * time.Second)
}
}
func TestDeleteJob(t *testing.T) {
podChan := make(chan struct{})
defer close(podChan)
conf := option.Config{
EtcdEndPoints: []string{"192.168.2.203:2379"},
MQAPI: "192.168.2.203:6300",
EventLogServers: []string{"192.168.2.203:6366"},
RbdRepoName: "rbd-dns",
RbdNamespace: "rbd-system",
MysqlConnectionInfo: "EeM2oc:lee7OhQu@tcp(192.168.2.203:3306)/region",
}
event.NewManager(event.EventConfig{
EventLogServers: conf.EventLogServers,
DiscoverArgs: &etcdutil.ClientArgs{Endpoints: conf.EtcdEndPoints},
})
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
if err != nil {
t.Fatal(err)
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
t.Fatal(err)
}
name := "fanyangyang"
namespace := "rbd-system"
logger := event.GetManager().GetLogger("0000")
writer := logger.GetWriter("builder", "info")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go getJobPodLogs(ctx, podChan, clientset, writer, namespace, name)
getJob(ctx, podChan, clientset, namespace, name)
t.Log("done")
}

View File

@ -70,6 +70,7 @@ type SourceCodeBuildItem struct {
RepoInfo *sources.RepostoryBuildInfo
commit Commit
Configs map[string]gjson.Result `json:"configs"`
Ctx context.Context
}
//Commit code Commit
@ -200,6 +201,7 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
i.Lang = string(lang)
}
i.Logger.Info("pull code successfully", map[string]string{"step": "codee-version"})
res, err := i.codeBuild()
if err != nil {
if err.Error() == context.DeadlineExceeded.Error() {
@ -222,13 +224,13 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
i.Logger.Error(util.Translation("No way of compiling to support this source type was found"), map[string]string{"step": "builder-exector", "status": "failure"})
return nil, err
}
extraHosts, err := i.getExtraHosts()
hostAlias, err := i.getHostAlias()
if err != nil {
i.Logger.Error(util.Translation("get rbd-repo ip failure"), map[string]string{"step": "builder-exector", "status": "failure"})
return nil, err
}
buildReq := &build.Request{
SourceDir: i.RepoInfo.GetCodeBuildAbsPath(),
SourceDir: i.RepoInfo.CodeHome,
CacheDir: i.CacheDir,
TGZDir: i.TGZDir,
RepositoryURL: i.RepoInfo.RepostoryURL,
@ -244,7 +246,9 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
BuildEnvs: i.BuildEnvs,
Logger: i.Logger,
DockerClient: i.DockerClient,
ExtraHosts: extraHosts,
KubeClient: i.KubeClient,
HostAlias: hostAlias,
Ctx: i.Ctx,
}
res, err := codeBuild.Build(buildReq)
return res, err
@ -253,6 +257,7 @@ func (i *SourceCodeBuildItem) codeBuild() (*build.Response, error) {
func (i *SourceCodeBuildItem) getExtraHosts() (extraHosts []string, err error) {
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(i.RbdRepoName, metav1.GetOptions{})
if err != nil {
logrus.Errorf("do not found ep by name: %s in namespace: %s", i.RbdRepoName, i.Namespace)
return nil, err
}
for _, subset := range endpoints.Subsets {
@ -264,6 +269,21 @@ func (i *SourceCodeBuildItem) getExtraHosts() (extraHosts []string, err error) {
return
}
func (i *SourceCodeBuildItem) getHostAlias() (hostAliasList []build.HostAlias, err error) {
endpoints, err := i.KubeClient.CoreV1().Endpoints(i.RbdNamespace).Get(i.RbdRepoName, metav1.GetOptions{})
if err != nil {
logrus.Errorf("do not found ep by name: %s in namespace: %s", i.RbdRepoName, i.Namespace)
return nil, err
}
hostNames := []string{"maven.goodrain.me", "lang.goodrain.me"}
for _, subset := range endpoints.Subsets {
for _, addr := range subset.Addresses {
hostAliasList = append(hostAliasList, build.HostAlias{IP: addr.IP, Hostnames: hostNames})
}
}
return
}
//IsDockerfile CheckDockerfile
func (i *SourceCodeBuildItem) IsDockerfile() bool {
filepath := path.Join(i.RepoInfo.GetCodeBuildAbsPath(), "Dockerfile")

View File

@ -318,6 +318,7 @@ func (e *exectorManager) buildFromSourceCode(task *pb.TaskMessage) {
i.KubeClient = e.KubeClient
i.RbdNamespace = e.cfg.RbdNamespace
i.RbdRepoName = e.cfg.RbdRepoName
i.Ctx = e.ctx
i.Logger.Info("Build app version from source code start", map[string]string{"step": "builder-exector", "status": "starting"})
start := time.Now()
defer event.GetManager().ReleaseLogger(i.Logger)

View File

@ -0,0 +1,111 @@
// Copyright (C) 2014-2018 Goodrain Co., Ltd.
// RAINBOND, Application Management Platform
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. For any non-GPL usage of Rainbond,
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
// must be obtained first.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package exector
import (
"context"
"encoding/json"
"runtime"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/docker/docker/client"
"k8s.io/client-go/kubernetes"
"github.com/goodrain/rainbond/builder/parser/code"
"github.com/goodrain/rainbond/cmd/builder/option"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/mq/api/grpc/pb"
mqclient "github.com/goodrain/rainbond/mq/client"
etcdutil "github.com/goodrain/rainbond/util/etcd"
k8sutil "github.com/goodrain/rainbond/util/k8s"
)
func Test_exectorManager_buildFromSourceCode(t *testing.T) {
conf := option.Config{
EtcdEndPoints: []string{"192.168.2.203:2379"},
MQAPI: "192.168.2.203:6300",
EventLogServers: []string{"192.168.2.203:6366"},
RbdRepoName: "rbd-dns",
RbdNamespace: "rbd-system",
MysqlConnectionInfo: "EeM2oc:lee7OhQu@tcp(192.168.2.203:3306)/region",
}
etcdArgs := etcdutil.ClientArgs{Endpoints: conf.EtcdEndPoints}
event.NewManager(event.EventConfig{
EventLogServers: conf.EventLogServers,
DiscoverArgs: &etcdArgs,
})
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/admin.kubeconfig")
if err != nil {
t.Fatal(err)
}
kubeClient, err := kubernetes.NewForConfig(restConfig)
dockerClient, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
etcdCli, err := clientv3.New(clientv3.Config{
Endpoints: conf.EtcdEndPoints,
DialTimeout: 10 * time.Second,
})
var maxConcurrentTask int
if conf.MaxTasks == 0 {
maxConcurrentTask = runtime.NumCPU() * 2
} else {
maxConcurrentTask = conf.MaxTasks
}
mqClient, err := mqclient.NewMqClient(&etcdArgs, conf.MQAPI)
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithCancel(context.Background())
e := &exectorManager{
DockerClient: dockerClient,
KubeClient: kubeClient,
EtcdCli: etcdCli,
tasks: make(chan *pb.TaskMessage, maxConcurrentTask),
maxConcurrentTask: maxConcurrentTask,
mqClient: mqClient,
ctx: ctx,
cancel: cancel,
cfg: conf,
}
taskBodym := make(map[string]interface{})
taskBodym["repo_url"] = "https://github.com/goodrain/java-maven-demo.git"
taskBodym["branch"] = "master"
taskBodym["tenant_id"] = "5d7bd886e6dc4425bb6c2ac5fc9fa593"
taskBodym["service_id"] = "4eaa41ccf145b8e43a6aeb1a5efeab53"
taskBodym["deploy_version"] = "20200115193617"
taskBodym["lang"] = code.JavaMaven
taskBodym["event_id"] = "0000"
taskBodym["envs"] = map[string]string{}
taskBody, _ := json.Marshal(taskBodym)
task := pb.TaskMessage{
TaskType: "build_from_source_code",
TaskBody: taskBody,
}
i := NewSouceCodeBuildItem(task.TaskBody)
if err := i.Run(30 * time.Second); err != nil {
t.Fatal(err)
}
e.buildFromSourceCode(&task)
}

View File

@ -19,6 +19,7 @@
package parser
import (
"encoding/json"
"fmt"
"testing"
@ -294,3 +295,47 @@ func TestDockerCompose30Parse(t *testing.T) {
}
fmt.Printf("ServiceInfo:%+v \n", p.GetServiceInfo())
}
var fanyy = `
version: "2"
services:
DOClever:
image: lw96/doclever
restart: always
container_name: "DOClever"
ports:
- 10000:10000
volumes:
- /root/doclever/data/file:/root/DOClever/data/file
- /root/doclever/data/img:/root/DOClever/data/img
- /root/doclever/data/tmp:/root/DOClever/data/tmp
environment:
- DB_HOST=mongodb://mongo:27017/DOClever
- PORT=10000
links:
- mongo:mongo
mongo:
image: mongo:latest
restart: always
container_name: "mongodb"
ports:
- 27017:27017
volumes:
- /root/doclever/data/db:/data/db
`
func TestDockerComposefanyy(t *testing.T) {
dockerclient, err := client.NewEnvClient()
if err != nil {
t.Fatal(err)
}
p := CreateDockerComposeParse(fanyy, dockerclient, "", "", event.GetTestLogger())
if err := p.Parse(); err != nil {
logrus.Errorf(err.Error())
return
}
svsInfos := p.GetServiceInfo()
ss, _ := json.Marshal(svsInfos)
fmt.Printf("ServiceInfo:%+v \n", string(ss))
}