mirror of
https://gitee.com/rainbond/Rainbond.git
synced 2024-11-29 18:27:58 +08:00
Refactor the flow control of the source code build.
This commit is contained in:
parent
9fded4ef34
commit
f27b647868
@ -23,31 +23,26 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/fsnotify/fsnotify"
|
|
||||||
"github.com/goodrain/rainbond/builder"
|
|
||||||
"github.com/goodrain/rainbond/builder/sources"
|
|
||||||
"github.com/goodrain/rainbond/event"
|
|
||||||
"github.com/goodrain/rainbond/util"
|
|
||||||
"github.com/pquerna/ffjson/ffjson"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/fsnotify/fsnotify"
|
||||||
|
"github.com/goodrain/rainbond/builder"
|
||||||
|
jobc "github.com/goodrain/rainbond/builder/job"
|
||||||
|
"github.com/goodrain/rainbond/builder/sources"
|
||||||
|
"github.com/goodrain/rainbond/event"
|
||||||
|
"github.com/goodrain/rainbond/util"
|
||||||
|
"github.com/pquerna/ffjson/ffjson"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
|
||||||
"k8s.io/client-go/kubernetes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func slugBuilder() (Build, error) {
|
func slugBuilder() (Build, error) {
|
||||||
@ -67,17 +62,22 @@ func (s *slugBuild) Build(re *Request) (*Response, error) {
|
|||||||
s.re = re
|
s.re = re
|
||||||
s.buildCacheDir = re.CacheDir
|
s.buildCacheDir = re.CacheDir
|
||||||
packageName := fmt.Sprintf("%s/%s.tgz", s.tgzDir, re.DeployVersion)
|
packageName := fmt.Sprintf("%s/%s.tgz", s.tgzDir, re.DeployVersion)
|
||||||
|
//Stops previous build tasks for the same component
|
||||||
|
//If an error occurs, it does not affect the current build task
|
||||||
|
if err := s.stopPreBuildJob(re); err != nil {
|
||||||
|
logrus.Errorf("stop pre build job for service %s failure %s", re.ServiceID, err.Error())
|
||||||
|
}
|
||||||
if err := s.runBuildJob(re); err != nil {
|
if err := s.runBuildJob(re); err != nil {
|
||||||
re.Logger.Error(util.Translation("Compiling the source code failure"), map[string]string{"step": "build-code", "status": "failure"})
|
re.Logger.Error(util.Translation("Compiling the source code failure"), map[string]string{"step": "build-code", "status": "failure"})
|
||||||
logrus.Error("build slug in container error,", err.Error())
|
logrus.Error("build slug in container error,", err.Error())
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
re.Logger.Info("code build success", map[string]string{"step": "build-exector"})
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := os.Remove(packageName); err != nil {
|
if err := os.Remove(packageName); err != nil && !strings.Contains(err.Error(), "no such file or directory") {
|
||||||
logrus.Warningf("pkg name: %s; remove slug pkg: %v", packageName, err)
|
logrus.Warningf("pkg name: %s; remove slug pkg: %v", packageName, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
fileInfo, err := os.Stat(packageName)
|
fileInfo, err := os.Stat(packageName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
re.Logger.Error(util.Translation("Check that the build result failure"), map[string]string{"step": "build-code", "status": "failure"})
|
re.Logger.Error(util.Translation("Check that the build result failure"), map[string]string{"step": "build-code", "status": "failure"})
|
||||||
@ -96,7 +96,7 @@ func (s *slugBuild) Build(re *Request) (*Response, error) {
|
|||||||
map[string]string{"step": "build-code", "status": "failure"})
|
map[string]string{"step": "build-code", "status": "failure"})
|
||||||
return nil, fmt.Errorf("build runner image failure")
|
return nil, fmt.Errorf("build runner image failure")
|
||||||
}
|
}
|
||||||
re.Logger.Info(util.Translation("Compiling the source code SUCCESS"), map[string]string{"step": "build-code", "status": "success"})
|
re.Logger.Info(util.Translation("build runtime image success"), map[string]string{"step": "build-code", "status": "success"})
|
||||||
res := &Response{
|
res := &Response{
|
||||||
MediumType: ImageMediumType,
|
MediumType: ImageMediumType,
|
||||||
MediumPath: imageName,
|
MediumPath: imageName,
|
||||||
@ -222,7 +222,7 @@ func (s *slugBuild) readLogFile(logfile string, logger event.Logger, closed chan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *slugBuild) getSourceCodeTarFile(re *Request) (*os.File, error) {
|
func (s *slugBuild) getSourceCodeTarFile(re *Request) (string, error) {
|
||||||
var cmd []string
|
var cmd []string
|
||||||
sourceTarFile := fmt.Sprintf("%s/%s-%s.tar", util.GetParentDirectory(re.SourceDir), re.ServiceID, re.DeployVersion)
|
sourceTarFile := fmt.Sprintf("%s/%s-%s.tar", util.GetParentDirectory(re.SourceDir), re.ServiceID, re.DeployVersion)
|
||||||
if re.ServerType == "svn" {
|
if re.ServerType == "svn" {
|
||||||
@ -235,37 +235,41 @@ func (s *slugBuild) getSourceCodeTarFile(re *Request) (*os.File, error) {
|
|||||||
source.Dir = re.SourceDir
|
source.Dir = re.SourceDir
|
||||||
logrus.Debugf("tar source code to file %s", sourceTarFile)
|
logrus.Debugf("tar source code to file %s", sourceTarFile)
|
||||||
if err := source.Run(); err != nil {
|
if err := source.Run(); err != nil {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
return os.OpenFile(sourceTarFile, os.O_RDONLY, 0755)
|
return sourceTarFile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *slugBuild) prepareSourceCodeFile(re *Request) error {
|
//stopPreBuildJob Stops previous build tasks for the same component
|
||||||
var cmd []string
|
//The same component retains only one build task to perform
|
||||||
if re.ServerType == "svn" {
|
func (s *slugBuild) stopPreBuildJob(re *Request) error {
|
||||||
cmd = append(cmd, "rm", "-rf", path.Join(re.SourceDir, "./.svn"))
|
jobList, err := jobc.GetJobController().GetServiceJobs(re.ServiceID)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("get pre build job for service %s failure ,%s", re.ServiceID, err.Error())
|
||||||
}
|
}
|
||||||
if re.ServerType == "git" {
|
if jobList != nil && len(jobList) > 0 {
|
||||||
cmd = append(cmd, "rm", "-rf", path.Join(re.SourceDir, "./.git"))
|
for _, job := range jobList {
|
||||||
|
jobc.GetJobController().DeleteJob(job.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
source := exec.Command(cmd[0], cmd[1:]...)
|
|
||||||
if err := source.Run(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logrus.Debug("delete .git and .svn folder success")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *slugBuild) runBuildJob(re *Request) error {
|
func (s *slugBuild) runBuildJob(re *Request) error {
|
||||||
ctx, cancel := context.WithCancel(re.Ctx)
|
//prepare build code dir
|
||||||
defer cancel()
|
re.Logger.Info(util.Translation("Start make code package"), map[string]string{"step": "build-exector"})
|
||||||
logrus.Info("start build job")
|
start := time.Now()
|
||||||
// delete .git and .svn folder
|
sourceTarFileName, err := s.getSourceCodeTarFile(re)
|
||||||
if err := s.prepareSourceCodeFile(re); err != nil {
|
if err != nil {
|
||||||
logrus.Error("delete .git and .svn folder error")
|
return fmt.Errorf("create source code tar file error:%s", err.Error())
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
name := fmt.Sprintf("%s-%s", re.ServiceID, re.Commit.Hash[0:7])
|
re.Logger.Info(util.Translation("make code package success"), map[string]string{"step": "build-exector"})
|
||||||
|
logrus.Infof("package code for building service %s version %s successful, take time %s", re.ServiceID, re.DeployVersion, time.Now().Sub(start))
|
||||||
|
// remove source cache tar file
|
||||||
|
defer func() {
|
||||||
|
os.Remove(sourceTarFileName)
|
||||||
|
}()
|
||||||
|
name := fmt.Sprintf("%s-%s", re.ServiceID, re.DeployVersion)
|
||||||
namespace := "rbd-system"
|
namespace := "rbd-system"
|
||||||
envs := []corev1.EnvVar{
|
envs := []corev1.EnvVar{
|
||||||
corev1.EnvVar{Name: "SLUG_VERSION", Value: re.DeployVersion},
|
corev1.EnvVar{Name: "SLUG_VERSION", Value: re.DeployVersion},
|
||||||
@ -284,13 +288,16 @@ func (s *slugBuild) runBuildJob(re *Request) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
job := batchv1.Job{}
|
job := corev1.Pod{
|
||||||
job.Name = name
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
job.Namespace = namespace
|
Name: name,
|
||||||
podTempSpec := corev1.PodTemplateSpec{}
|
Namespace: namespace,
|
||||||
podTempSpec.Name = name
|
Labels: map[string]string{
|
||||||
podTempSpec.Namespace = namespace
|
"service": re.ServiceID,
|
||||||
|
"job": "codebuild",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
podSpec := corev1.PodSpec{RestartPolicy: corev1.RestartPolicyOnFailure} // only support never and onfailure
|
podSpec := corev1.PodSpec{RestartPolicy: corev1.RestartPolicyOnFailure} // only support never and onfailure
|
||||||
podSpec.Volumes = []corev1.Volume{
|
podSpec.Volumes = []corev1.Volume{
|
||||||
corev1.Volume{
|
corev1.Volume{
|
||||||
@ -314,9 +321,7 @@ func (s *slugBuild) runBuildJob(re *Request) error {
|
|||||||
container.Env = envs
|
container.Env = envs
|
||||||
container.Args = []string{"local"}
|
container.Args = []string{"local"}
|
||||||
slugSubPath := strings.TrimPrefix(re.TGZDir, "/grdata/")
|
slugSubPath := strings.TrimPrefix(re.TGZDir, "/grdata/")
|
||||||
logrus.Debugf("slug subpath is : %s", slugSubPath)
|
sourceTarPath := strings.TrimPrefix(sourceTarFileName, "/cache/")
|
||||||
appSubPath := strings.TrimPrefix(re.SourceDir, "/cache/")
|
|
||||||
logrus.Debugf("app subpath is : %s", appSubPath)
|
|
||||||
cacheSubPath := strings.TrimPrefix(re.CacheDir, "/cache/")
|
cacheSubPath := strings.TrimPrefix(re.CacheDir, "/cache/")
|
||||||
container.VolumeMounts = []corev1.VolumeMount{
|
container.VolumeMounts = []corev1.VolumeMount{
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
@ -331,238 +336,69 @@ func (s *slugBuild) runBuildJob(re *Request) error {
|
|||||||
},
|
},
|
||||||
corev1.VolumeMount{
|
corev1.VolumeMount{
|
||||||
Name: "app",
|
Name: "app",
|
||||||
MountPath: "/tmp/app",
|
MountPath: "/tmp/app-source.tar",
|
||||||
SubPath: appSubPath,
|
SubPath: sourceTarPath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
podSpec.Containers = append(podSpec.Containers, container)
|
podSpec.Containers = append(podSpec.Containers, container)
|
||||||
for _, ha := range re.HostAlias {
|
for _, ha := range re.HostAlias {
|
||||||
podSpec.HostAliases = append(podSpec.HostAliases, corev1.HostAlias{IP: ha.IP, Hostnames: ha.Hostnames})
|
podSpec.HostAliases = append(podSpec.HostAliases, corev1.HostAlias{IP: ha.IP, Hostnames: ha.Hostnames})
|
||||||
}
|
}
|
||||||
podTempSpec.Spec = podSpec
|
job.Spec = podSpec
|
||||||
job.Spec.Template = podTempSpec
|
|
||||||
|
|
||||||
_, err := re.KubeClient.BatchV1().Jobs(namespace).Create(&job)
|
|
||||||
if err != nil {
|
|
||||||
if !k8sErrors.IsAlreadyExists(err) {
|
|
||||||
logrus.Errorf("create new job:%s failed: %s", name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err := re.KubeClient.BatchV1().Jobs(namespace).Get(job.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("get old job:%s failed : %s", name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
waitChan := make(chan struct{})
|
|
||||||
// if get old job, must clean it before re create a new one
|
|
||||||
go waitOldJobDeleted(ctx, waitChan, re.KubeClient, namespace, name)
|
|
||||||
|
|
||||||
var gracePeriod int64 = 0
|
|
||||||
if err := re.KubeClient.BatchV1().Jobs(namespace).Delete(job.Name, &metav1.DeleteOptions{
|
|
||||||
GracePeriodSeconds: &gracePeriod,
|
|
||||||
}); err != nil {
|
|
||||||
logrus.Errorf("get old job:%s failed: %s", name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
<-waitChan
|
|
||||||
logrus.Infof("old job has beed cleaned, create new job: %s", job.Name)
|
|
||||||
|
|
||||||
if _, err := re.KubeClient.BatchV1().Jobs(namespace).Create(&job); err != nil {
|
|
||||||
logrus.Errorf("create new job:%s failed: %s", name, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer delete(re.KubeClient, namespace, job.Name)
|
|
||||||
|
|
||||||
// get job builder log and delete job util it is finished
|
|
||||||
writer := re.Logger.GetWriter("builder", "info")
|
writer := re.Logger.GetWriter("builder", "info")
|
||||||
|
reChan := make(chan string, 2)
|
||||||
podChan := make(chan struct{})
|
err = jobc.GetJobController().ExecJob(&job, writer, reChan)
|
||||||
|
|
||||||
go getJobPodLogs(ctx, podChan, re.KubeClient, writer, namespace, job.Name)
|
|
||||||
getJob(ctx, podChan, re.KubeClient, namespace, job.Name)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitOldJobDeleted(ctx context.Context, waitChan chan struct{}, clientset kubernetes.Interface, namespace, name string) {
|
|
||||||
labelSelector := fmt.Sprintf("job-name=%s", name)
|
|
||||||
jobWatch, err := clientset.BatchV1().Jobs(namespace).Watch(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Errorf("watch job: %s failed: %s", name, err.Error())
|
logrus.Errorf("create new job:%s failed: %s", name, err.Error())
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
|
re.Logger.Info(util.Translation("create build code job success"), map[string]string{"step": "build-exector"})
|
||||||
|
logrus.Infof("create build job %s for service %s build version %s", job.Name, re.ServiceID, re.DeployVersion)
|
||||||
|
// delete job after complete
|
||||||
|
defer jobc.GetJobController().DeleteJob(job.Name)
|
||||||
|
return s.waitingComplete(re, reChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *slugBuild) waitingComplete(re *Request, reChan chan string) (err error) {
|
||||||
|
var logComplete = false
|
||||||
|
var jobComplete = false
|
||||||
|
timeout := time.NewTimer(time.Millisecond * 60)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-time.After(30 * time.Second):
|
case <-timeout.C:
|
||||||
logrus.Warnf("wait old job[%s] cleaned time out", name)
|
return fmt.Errorf("build time out")
|
||||||
waitChan <- struct{}{}
|
case jobStatus := <-reChan:
|
||||||
return
|
switch jobStatus {
|
||||||
case <-ctx.Done():
|
case "complete":
|
||||||
return
|
jobComplete = true
|
||||||
case evt, ok := <-jobWatch.ResultChan():
|
if logComplete {
|
||||||
if !ok {
|
return nil
|
||||||
logrus.Error("old job watch chan be closed")
|
}
|
||||||
return
|
re.Logger.Info(util.Translation("build code job exec completed"), map[string]string{"step": "build-exector"})
|
||||||
}
|
case "failed":
|
||||||
switch evt.Type {
|
jobComplete = true
|
||||||
case watch.Deleted:
|
err = fmt.Errorf("build code job exec failure")
|
||||||
logrus.Infof("old job deleted : %s", name)
|
if logComplete {
|
||||||
waitChan <- struct{}{}
|
return err
|
||||||
return
|
}
|
||||||
|
re.Logger.Info(util.Translation("build code job exec failed"), map[string]string{"step": "build-exector"})
|
||||||
|
case "cancel":
|
||||||
|
jobComplete = true
|
||||||
|
err = fmt.Errorf("build code job is canceled")
|
||||||
|
if logComplete {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case "logcomplete":
|
||||||
|
logComplete = true
|
||||||
|
if jobComplete {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getJob(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, namespace, name string) {
|
//runBuildContainer The deprecated
|
||||||
var job *batchv1.Job
|
|
||||||
labelSelector := fmt.Sprintf("job-name=%s", name)
|
|
||||||
jobWatch, err := clientset.BatchV1().Jobs(namespace).Watch(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("watch job: %s failed: %s", name, err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
once := sync.Once{}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case evt, ok := <-jobWatch.ResultChan():
|
|
||||||
if !ok {
|
|
||||||
logrus.Error("job watch chan be closed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch evt.Type {
|
|
||||||
case watch.Modified, watch.Added:
|
|
||||||
job, _ = evt.Object.(*batchv1.Job)
|
|
||||||
if job.Name == name {
|
|
||||||
logrus.Debugf("job: %s status is: %+v ", name, job.Status)
|
|
||||||
// active means this job has bound a pod, can't ensure this pod's status is running or creating or initing or some status else
|
|
||||||
if job.Status.Active > 0 {
|
|
||||||
once.Do(func() {
|
|
||||||
logrus.Debug("job is ready")
|
|
||||||
waitPod(ctx, podChan, clientset, namespace, name)
|
|
||||||
podChan <- struct{}{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if job.Status.Succeeded > 0 || job.Status.Failed > 0 {
|
|
||||||
logrus.Debug("job is finished")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case watch.Error:
|
|
||||||
logrus.Errorf("job: %s error", name)
|
|
||||||
return
|
|
||||||
case watch.Deleted:
|
|
||||||
logrus.Infof("job deleted : %s", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitPod(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, namespace, name string) {
|
|
||||||
logrus.Debug("waiting pod")
|
|
||||||
var pod *corev1.Pod
|
|
||||||
labelSelector := fmt.Sprintf("job-name=%s", name)
|
|
||||||
podWatch, err := clientset.CoreV1().Pods(namespace).Watch(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case evt, ok := <-podWatch.ResultChan():
|
|
||||||
if !ok {
|
|
||||||
logrus.Error("pod watch chan be closed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch evt.Type {
|
|
||||||
case watch.Added, watch.Modified:
|
|
||||||
pod, _ = evt.Object.(*corev1.Pod)
|
|
||||||
logrus.Debugf("pod status is : %s", pod.Status.Phase)
|
|
||||||
if len(pod.Status.ContainerStatuses) > 0 && pod.Status.ContainerStatuses[0].Ready {
|
|
||||||
logrus.Debug("pod is running")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case watch.Deleted:
|
|
||||||
logrus.Infof("pod : %s deleted", name)
|
|
||||||
return
|
|
||||||
case watch.Error:
|
|
||||||
logrus.Errorf("pod : %s error", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getJobPodLogs(ctx context.Context, podChan chan struct{}, clientset kubernetes.Interface, writer event.LoggerWriter, namespace, job string) {
|
|
||||||
once := sync.Once{}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-podChan:
|
|
||||||
once.Do(func() {
|
|
||||||
logrus.Debug("pod ready")
|
|
||||||
labelSelector := fmt.Sprintf("job-name=%s", job)
|
|
||||||
pods, err := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("do not found job's pod, %s", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
logrus.Debug("pod name is : ", pods.Items[0].Name)
|
|
||||||
podLogRequest := clientset.CoreV1().Pods(namespace).GetLogs(pods.Items[0].Name, &corev1.PodLogOptions{Follow: true})
|
|
||||||
reader, err := podLogRequest.Stream()
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("get build job pod log data error: %s, retry net loop", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
bufReader := bufio.NewReader(reader)
|
|
||||||
for {
|
|
||||||
line, err := bufReader.ReadBytes('\n')
|
|
||||||
writer.Write(line)
|
|
||||||
if err == io.EOF {
|
|
||||||
logrus.Info("get job log finished(io.EOF)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warningf("get job log error: %s", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func delete(clientset kubernetes.Interface, namespace, job string) {
|
|
||||||
logrus.Debugf("start delete job: %s", job)
|
|
||||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", job)}
|
|
||||||
|
|
||||||
if err := clientset.CoreV1().Pods(namespace).DeleteCollection(&metav1.DeleteOptions{}, listOptions); err != nil {
|
|
||||||
logrus.Errorf("delete job pod failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete job
|
|
||||||
if err := clientset.BatchV1().Jobs(namespace).Delete(job, &metav1.DeleteOptions{}); err != nil {
|
|
||||||
logrus.Errorf("delete job failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debug("delete job finish")
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *slugBuild) runBuildContainer(re *Request) error {
|
func (s *slugBuild) runBuildContainer(re *Request) error {
|
||||||
envs := []*sources.KeyValue{
|
envs := []*sources.KeyValue{
|
||||||
&sources.KeyValue{Key: "SLUG_VERSION", Value: re.DeployVersion},
|
&sources.KeyValue{Key: "SLUG_VERSION", Value: re.DeployVersion},
|
||||||
@ -612,7 +448,11 @@ func (s *slugBuild) runBuildContainer(re *Request) error {
|
|||||||
Args: []string{"local"},
|
Args: []string{"local"},
|
||||||
ExtraHosts: re.ExtraHosts,
|
ExtraHosts: re.ExtraHosts,
|
||||||
}
|
}
|
||||||
reader, err := s.getSourceCodeTarFile(re)
|
sourceTarFileName, err := s.getSourceCodeTarFile(re)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("create source code tar file error:%s", err.Error())
|
||||||
|
}
|
||||||
|
reader, err := os.OpenFile(sourceTarFileName, os.O_RDONLY, 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("create source code tar file error:%s", err.Error())
|
return fmt.Errorf("create source code tar file error:%s", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -2,13 +2,10 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
"io"
|
"io"
|
||||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -24,10 +21,6 @@ import (
|
|||||||
etcdutil "github.com/goodrain/rainbond/util/etcd"
|
etcdutil "github.com/goodrain/rainbond/util/etcd"
|
||||||
k8sutil "github.com/goodrain/rainbond/util/k8s"
|
k8sutil "github.com/goodrain/rainbond/util/k8s"
|
||||||
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
|
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -133,251 +126,3 @@ func TestDockerClient(t *testing.T) {
|
|||||||
// t.Log("image is : ", image.ID)
|
// t.Log("image is : ", image.ID)
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetPogLog(t *testing.T) {
|
|
||||||
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
clientset, err := kubernetes.NewForConfig(restConfig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
re := Request{
|
|
||||||
KubeClient: clientset,
|
|
||||||
ServiceID: "aae30a8d6a66ea9024197bc8deecd137",
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
fmt.Println("waiting job finish")
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
job, err := re.KubeClient.BatchV1().Jobs("rbd-system").Get(re.ServiceID, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("get job error: %s", err.Error())
|
|
||||||
}
|
|
||||||
if job == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if job.Status.Active > 0 {
|
|
||||||
fmt.Println("build job start")
|
|
||||||
var po corev1.Pod
|
|
||||||
labelSelector := fmt.Sprintf("job-name=%s", re.ServiceID)
|
|
||||||
for {
|
|
||||||
pos, err := re.KubeClient.CoreV1().Pods("rbd-system").List(metav1.ListOptions{LabelSelector: labelSelector})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf(" get po error: %s", err.Error())
|
|
||||||
}
|
|
||||||
if len(pos.Items) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(pos.Items[0].Spec.Containers) > 0 {
|
|
||||||
fmt.Println("pod container ready, start write log")
|
|
||||||
po = pos.Items[0]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
}
|
|
||||||
podLogRequest := re.KubeClient.CoreV1().Pods("rbd-system").GetLogs(po.Name, &corev1.PodLogOptions{Follow: true})
|
|
||||||
reader, err := podLogRequest.Stream()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("get build job pod log data error: ", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
bufReader := bufio.NewReader(reader)
|
|
||||||
for {
|
|
||||||
line, err := bufReader.ReadBytes('\n')
|
|
||||||
fmt.Println(string(line))
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("get job log error: %s", err.Error())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if job.Status.Succeeded > 0 {
|
|
||||||
fmt.Println("build job have done successfully")
|
|
||||||
if err = re.KubeClient.BatchV1().Jobs("rbd-system").Delete(re.ServiceID, &metav1.DeleteOptions{}); err != nil {
|
|
||||||
fmt.Printf("delete job failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if job.Status.Failed > 0 {
|
|
||||||
fmt.Println("build job have done failed")
|
|
||||||
if err = re.KubeClient.BatchV1().Jobs("rbd-system").Delete(re.ServiceID, &metav1.DeleteOptions{}); err != nil {
|
|
||||||
fmt.Printf("delete job failed: %s", err.Error())
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteJobAuto(t *testing.T) {
|
|
||||||
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
clientset, err := kubernetes.NewForConfig(restConfig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
job := batchv1.Job{}
|
|
||||||
job.Name = "fanyangyang"
|
|
||||||
job.Namespace = "rbd-system"
|
|
||||||
|
|
||||||
var ttl int32
|
|
||||||
ttl = 0
|
|
||||||
job.Spec.TTLSecondsAfterFinished = &ttl // k8s version >= 1.12
|
|
||||||
job.Spec = batchv1.JobSpec{
|
|
||||||
TTLSecondsAfterFinished: &ttl,
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
|
||||||
Containers: []corev1.Container{
|
|
||||||
corev1.Container{
|
|
||||||
Name: "fanyangyang",
|
|
||||||
Image: "busybox",
|
|
||||||
Command: []string{"echo", "hello job"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = clientset.BatchV1().Jobs(job.Namespace).Create(&job)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("create job error: ", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
j, err := clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
t.Error("get job error: ", err.Error())
|
|
||||||
}
|
|
||||||
if j == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if j.Status.Active > 0 {
|
|
||||||
fmt.Println("job is running")
|
|
||||||
}
|
|
||||||
if j.Status.Succeeded > 0 {
|
|
||||||
fmt.Println("job is succeed, waiting auto delete")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if j.Status.Failed > 0 {
|
|
||||||
fmt.Println("job is failed, waiting next")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteJob(t *testing.T) {
|
|
||||||
podChan := make(chan struct{})
|
|
||||||
defer close(podChan)
|
|
||||||
conf := option.Config{
|
|
||||||
EtcdEndPoints: []string{"192.168.2.203:2379"},
|
|
||||||
MQAPI: "192.168.2.203:6300",
|
|
||||||
EventLogServers: []string{"192.168.2.203:6366"},
|
|
||||||
RbdRepoName: "rbd-dns",
|
|
||||||
RbdNamespace: "rbd-system",
|
|
||||||
MysqlConnectionInfo: "EeM2oc:lee7OhQu@tcp(192.168.2.203:3306)/region",
|
|
||||||
}
|
|
||||||
event.NewManager(event.EventConfig{
|
|
||||||
EventLogServers: conf.EventLogServers,
|
|
||||||
DiscoverArgs: &etcdutil.ClientArgs{Endpoints: conf.EtcdEndPoints},
|
|
||||||
})
|
|
||||||
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/remote/192.168.2.206/admin.kubeconfig")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
clientset, err := kubernetes.NewForConfig(restConfig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
name := "fanyangyang"
|
|
||||||
namespace := "rbd-system"
|
|
||||||
logger := event.GetManager().GetLogger("0000")
|
|
||||||
writer := logger.GetWriter("builder", "info")
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
go getJobPodLogs(ctx, podChan, clientset, writer, namespace, name)
|
|
||||||
getJob(ctx, podChan, clientset, namespace, name)
|
|
||||||
t.Log("done")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteOldJobFirst(t *testing.T) {
|
|
||||||
restConfig, err := k8sutil.NewRestConfig("/Users/fanyangyang/Documents/company/goodrain/local/10.211.55.4.kubeconfig")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
clientset, err := kubernetes.NewForConfig(restConfig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
name := "fanyangyang"
|
|
||||||
namespace := "rbd-system"
|
|
||||||
|
|
||||||
job := batchv1.Job{}
|
|
||||||
job.Name = name
|
|
||||||
job.Namespace = namespace
|
|
||||||
|
|
||||||
var ttl int32
|
|
||||||
ttl = 0
|
|
||||||
job.Spec.TTLSecondsAfterFinished = &ttl // k8s version >= 1.12
|
|
||||||
job.Spec = batchv1.JobSpec{
|
|
||||||
TTLSecondsAfterFinished: &ttl,
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
RestartPolicy: corev1.RestartPolicyNever,
|
|
||||||
Containers: []corev1.Container{
|
|
||||||
corev1.Container{
|
|
||||||
Name: name,
|
|
||||||
Image: "busybox",
|
|
||||||
Command: []string{"echo", "hello job"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = clientset.BatchV1().Jobs(namespace).Create(&job)
|
|
||||||
if err != nil {
|
|
||||||
if !k8sErrors.IsAlreadyExists(err) {
|
|
||||||
fmt.Printf("create new job:%s failed: %s \n", name, err.Error())
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
old, err := clientset.BatchV1().Jobs(namespace).Get(job.Name, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("get old job:%s failed : %s \n", name, err.Error())
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// if get old job, must clean it before re create a new one
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
go waitOldJobDeleted(ctx, clientset, namespace, name)
|
|
||||||
var gracePeriod int64 = 0
|
|
||||||
propagationPolicy := metav1.DeletePropagationBackground
|
|
||||||
if err := clientset.BatchV1().Jobs(namespace).Delete(job.Name, &metav1.DeleteOptions{
|
|
||||||
GracePeriodSeconds: &gracePeriod,
|
|
||||||
Preconditions: &metav1.Preconditions{
|
|
||||||
UID: &old.UID,
|
|
||||||
ResourceVersion: &old.ResourceVersion,
|
|
||||||
},
|
|
||||||
PropagationPolicy: &propagationPolicy,
|
|
||||||
}); err != nil {
|
|
||||||
fmt.Printf("get old job:%s failed: %s \n", name, err.Error())
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
logrus.Info("wait old job clean")
|
|
||||||
|
|
||||||
if _, err := clientset.BatchV1().Jobs(namespace).Create(&job); err != nil {
|
|
||||||
fmt.Printf("create new job:%s failed: %s\n", name, err.Error())
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -21,12 +21,13 @@ package exector
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/goodrain/rainbond/builder"
|
"github.com/goodrain/rainbond/builder"
|
||||||
@ -181,6 +182,7 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
|
|||||||
Message: commit.Message,
|
Message: commit.Message,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// clean cache code
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := os.RemoveAll(rbi.GetCodeHome()); err != nil {
|
if err := os.RemoveAll(rbi.GetCodeHome()); err != nil {
|
||||||
logrus.Warningf("remove source code: %v", err)
|
logrus.Warningf("remove source code: %v", err)
|
||||||
@ -203,7 +205,7 @@ func (i *SourceCodeBuildItem) Run(timeout time.Duration) error {
|
|||||||
i.Lang = string(lang)
|
i.Lang = string(lang)
|
||||||
}
|
}
|
||||||
|
|
||||||
i.Logger.Info("pull code successfully", map[string]string{"step": "codee-version"})
|
i.Logger.Info("pull or clone code successfully, start code build", map[string]string{"step": "codee-version"})
|
||||||
res, err := i.codeBuild()
|
res, err := i.codeBuild()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.Error() == context.DeadlineExceeded.Error() {
|
if err.Error() == context.DeadlineExceeded.Error() {
|
||||||
|
@ -21,7 +21,6 @@ package exector
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -35,6 +34,7 @@ import (
|
|||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
|
|
||||||
|
"github.com/goodrain/rainbond/builder/job"
|
||||||
"github.com/goodrain/rainbond/cmd/builder/option"
|
"github.com/goodrain/rainbond/cmd/builder/option"
|
||||||
"github.com/goodrain/rainbond/db"
|
"github.com/goodrain/rainbond/db"
|
||||||
"github.com/goodrain/rainbond/event"
|
"github.com/goodrain/rainbond/event"
|
||||||
@ -95,15 +95,20 @@ func NewManager(conf option.Config, mqc mqclient.MQClient) (Manager, error) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
etcdCli, err := etcdutil.NewClient(ctx, etcdClientArgs)
|
etcdCli, err := etcdutil.NewClient(ctx, etcdClientArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
cancel()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var maxConcurrentTask int
|
var maxConcurrentTask int
|
||||||
if conf.MaxTasks == 0 {
|
if conf.MaxTasks == 0 {
|
||||||
maxConcurrentTask = runtime.NumCPU() * 2
|
maxConcurrentTask = 50
|
||||||
} else {
|
} else {
|
||||||
maxConcurrentTask = conf.MaxTasks
|
maxConcurrentTask = conf.MaxTasks
|
||||||
}
|
}
|
||||||
|
stop := make(chan struct{})
|
||||||
|
if err := job.InitJobController(stop, kubeClient); err != nil {
|
||||||
|
cancel()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask)
|
logrus.Infof("The maximum number of concurrent build tasks supported by the current node is %d", maxConcurrentTask)
|
||||||
return &exectorManager{
|
return &exectorManager{
|
||||||
DockerClient: dockerClient,
|
DockerClient: dockerClient,
|
||||||
|
File diff suppressed because one or more lines are too long
196
builder/job/job.go
Normal file
196
builder/job/job.go
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
// RAINBOND, Application Management Platform
|
||||||
|
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||||
|
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||||
|
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||||
|
// must be obtained first.
|
||||||
|
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package job
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
v1 "k8s.io/client-go/informers/core/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
//Controller build job controller
|
||||||
|
type Controller interface {
|
||||||
|
ExecJob(job *corev1.Pod, logger io.Writer, result chan string) error
|
||||||
|
GetJob(string) (*corev1.Pod, error)
|
||||||
|
GetServiceJobs(serviceID string) ([]*corev1.Pod, error)
|
||||||
|
DeleteJob(job string)
|
||||||
|
}
|
||||||
|
type controller struct {
|
||||||
|
KubeClient kubernetes.Interface
|
||||||
|
ctx context.Context
|
||||||
|
jobInformer v1.PodInformer
|
||||||
|
namespace string
|
||||||
|
subJobStatus map[string]chan string
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobController *controller
|
||||||
|
|
||||||
|
//InitJobController init job controller
|
||||||
|
func InitJobController(stop chan struct{}, kubeClient kubernetes.Interface) error {
|
||||||
|
jobController = &controller{
|
||||||
|
KubeClient: kubeClient,
|
||||||
|
namespace: "rbd-system",
|
||||||
|
subJobStatus: make(map[string]chan string),
|
||||||
|
}
|
||||||
|
eventHandler := cache.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: func(obj interface{}) {
|
||||||
|
job, _ := obj.(*corev1.Pod)
|
||||||
|
logrus.Infof("[Watch] Build job pod %s created", job.Name)
|
||||||
|
},
|
||||||
|
DeleteFunc: func(obj interface{}) {
|
||||||
|
job, _ := obj.(*corev1.Pod)
|
||||||
|
jobController.lock.Lock()
|
||||||
|
defer jobController.lock.Unlock()
|
||||||
|
if ch, exist := jobController.subJobStatus[job.Name]; exist {
|
||||||
|
ch <- "cancel"
|
||||||
|
}
|
||||||
|
logrus.Infof("[Watch] Build job pod %s deleted", job.Name)
|
||||||
|
},
|
||||||
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
|
jobController.lock.Lock()
|
||||||
|
defer jobController.lock.Unlock()
|
||||||
|
job, _ := cur.(*corev1.Pod)
|
||||||
|
if len(job.Status.ContainerStatuses) > 0 {
|
||||||
|
buildContainer := job.Status.ContainerStatuses[0]
|
||||||
|
terminated := buildContainer.State.Terminated
|
||||||
|
if terminated != nil && terminated.ExitCode == 0 {
|
||||||
|
if ch, exist := jobController.subJobStatus[job.Name]; exist {
|
||||||
|
logrus.Infof("job %s container exit 0 and complete", job.Name)
|
||||||
|
ch <- "complete"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if terminated != nil && terminated.ExitCode > 0 {
|
||||||
|
if ch, exist := jobController.subJobStatus[job.Name]; exist {
|
||||||
|
ch <- "failed"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
logrus.Infof("job %s container %s state %+v", job.Name, buildContainer.Name, buildContainer.State)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
infFactory := informers.NewFilteredSharedInformerFactory(kubeClient, time.Second*3, jobController.namespace,
|
||||||
|
func(options *metav1.ListOptions) {
|
||||||
|
options.LabelSelector = "job=codebuild"
|
||||||
|
})
|
||||||
|
jobController.jobInformer = infFactory.Core().V1().Pods()
|
||||||
|
jobController.jobInformer.Informer().AddEventHandlerWithResyncPeriod(eventHandler, time.Second*10)
|
||||||
|
return jobController.Start(stop)
|
||||||
|
}
|
||||||
|
|
||||||
|
//GetJobController get job controller
|
||||||
|
func GetJobController() Controller {
|
||||||
|
return jobController
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) GetJob(name string) (*corev1.Pod, error) {
|
||||||
|
return c.jobInformer.Lister().Pods(c.namespace).Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) GetServiceJobs(serviceID string) ([]*corev1.Pod, error) {
|
||||||
|
s, err := labels.Parse("service=" + serviceID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
jobs, err := c.jobInformer.Lister().Pods(c.namespace).List(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return jobs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) ExecJob(job *corev1.Pod, logger io.Writer, result chan string) error {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if j, _ := c.GetJob(job.Name); j != nil {
|
||||||
|
go c.getLogger(job.Name, logger, result)
|
||||||
|
c.subJobStatus[job.Name] = result
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
_, err := c.KubeClient.CoreV1().Pods(c.namespace).Create(job)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go c.getLogger(job.Name, logger, result)
|
||||||
|
c.subJobStatus[job.Name] = result
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) Start(stop chan struct{}) error {
|
||||||
|
go c.jobInformer.Informer().Run(stop)
|
||||||
|
for !c.jobInformer.Informer().HasSynced() {
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) getLogger(job string, writer io.Writer, result chan string) {
|
||||||
|
defer func() {
|
||||||
|
result <- "logcomplete"
|
||||||
|
}()
|
||||||
|
for {
|
||||||
|
podLogRequest := c.KubeClient.CoreV1().Pods(c.namespace).GetLogs(job, &corev1.PodLogOptions{Follow: true})
|
||||||
|
reader, err := podLogRequest.Stream()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warnf("get build job pod log data error: %s, retry net loop", err.Error())
|
||||||
|
time.Sleep(time.Second * 3)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
bufReader := bufio.NewReader(reader)
|
||||||
|
for {
|
||||||
|
line, err := bufReader.ReadBytes('\n')
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warningf("get job log error: %s", err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writer.Write(line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *controller) DeleteJob(job string) {
|
||||||
|
namespace := c.namespace
|
||||||
|
logrus.Debugf("start delete job: %s", job)
|
||||||
|
// delete job
|
||||||
|
if err := c.KubeClient.CoreV1().Pods(namespace).Delete(job, &metav1.DeleteOptions{}); err != nil {
|
||||||
|
if !k8sErrors.IsNotFound(err) {
|
||||||
|
logrus.Errorf("delete job failed: %s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
delete(c.subJobStatus, job)
|
||||||
|
logrus.Infof("delete job %s finish", job)
|
||||||
|
}
|
@ -106,10 +106,10 @@ func (d *SourceCodeParse) Parse() ParseErrorList {
|
|||||||
return d.errors
|
return d.errors
|
||||||
}
|
}
|
||||||
gitFunc := func() ParseErrorList {
|
gitFunc := func() ParseErrorList {
|
||||||
//获取代码
|
//get code
|
||||||
if sources.CheckFileExist(buildInfo.GetCodeHome()) {
|
if !util.DirIsEmpty(buildInfo.GetCodeHome()) {
|
||||||
if err := sources.RemoveDir(buildInfo.GetCodeHome()); err != nil {
|
if err := sources.RemoveDir(buildInfo.GetCodeHome()); err != nil {
|
||||||
//d.errappend(ErrorAndSolve(err, "清理cache dir错误", "请提交代码到仓库"))
|
logrus.Errorf("remove code dir failure %s", err.Error())
|
||||||
return d.errors
|
return d.errors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,11 +22,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/goodrain/rainbond/event"
|
"github.com/goodrain/rainbond/event"
|
||||||
|
etcdutil "github.com/goodrain/rainbond/util/etcd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
event.NewManager(event.EventConfig{
|
event.NewManager(event.EventConfig{
|
||||||
DiscoverAddress: []string{"127.0.0.1:2379"},
|
DiscoverArgs: &etcdutil.ClientArgs{
|
||||||
|
Endpoints: []string{"127.0.0.1:2379"},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func TestCopyFileWithProgress(t *testing.T) {
|
func TestCopyFileWithProgress(t *testing.T) {
|
||||||
|
@ -113,6 +113,9 @@ func getShowURL(rurl string) string {
|
|||||||
//GitClone git clone code
|
//GitClone git clone code
|
||||||
func GitClone(csi CodeSourceInfo, sourceDir string, logger event.Logger, timeout int) (*git.Repository, error) {
|
func GitClone(csi CodeSourceInfo, sourceDir string, logger event.Logger, timeout int) (*git.Repository, error) {
|
||||||
GetPrivateFileParam := csi.TenantID
|
GetPrivateFileParam := csi.TenantID
|
||||||
|
if !strings.HasSuffix(csi.RepositoryURL, ".git") {
|
||||||
|
csi.RepositoryURL = csi.RepositoryURL + ".git"
|
||||||
|
}
|
||||||
flag := true
|
flag := true
|
||||||
Loop:
|
Loop:
|
||||||
if logger != nil {
|
if logger != nil {
|
||||||
@ -135,7 +138,7 @@ Loop:
|
|||||||
Progress: writer,
|
Progress: writer,
|
||||||
SingleBranch: true,
|
SingleBranch: true,
|
||||||
Tags: git.NoTags,
|
Tags: git.NoTags,
|
||||||
RecurseSubmodules: git.NoRecurseSubmodules,
|
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
|
||||||
Depth: 1,
|
Depth: 1,
|
||||||
}
|
}
|
||||||
if csi.Branch != "" {
|
if csi.Branch != "" {
|
||||||
|
@ -24,11 +24,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/goodrain/rainbond/event"
|
"github.com/goodrain/rainbond/event"
|
||||||
|
etcdutil "github.com/goodrain/rainbond/util/etcd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
event.NewManager(event.EventConfig{
|
event.NewManager(event.EventConfig{
|
||||||
DiscoverAddress: []string{"172.17.0.1:2379"},
|
DiscoverArgs: &etcdutil.ClientArgs{
|
||||||
|
Endpoints: []string{"127.0.0.1:2379"},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func TestGitClone(t *testing.T) {
|
func TestGitClone(t *testing.T) {
|
||||||
@ -49,8 +52,8 @@ func TestGitClone(t *testing.T) {
|
|||||||
func TestGitCloneByTag(t *testing.T) {
|
func TestGitCloneByTag(t *testing.T) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
csi := CodeSourceInfo{
|
csi := CodeSourceInfo{
|
||||||
RepositoryURL: "https://github.com/goodrain/rainbond-install.git",
|
RepositoryURL: "https://github.com/goodrain/rainbond-ui.git",
|
||||||
Branch: "tag:v3.5.1",
|
Branch: "master",
|
||||||
}
|
}
|
||||||
//logger := event.GetManager().GetLogger("system")
|
//logger := event.GetManager().GetLogger("system")
|
||||||
res, err := GitClone(csi, "/tmp/rainbonddoc4", event.GetTestLogger(), 1)
|
res, err := GitClone(csi, "/tmp/rainbonddoc4", event.GetTestLogger(), 1)
|
||||||
|
@ -22,11 +22,14 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/goodrain/rainbond/event"
|
"github.com/goodrain/rainbond/event"
|
||||||
|
etcdutil "github.com/goodrain/rainbond/util/etcd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
event.NewManager(event.EventConfig{
|
event.NewManager(event.EventConfig{
|
||||||
DiscoverAddress: []string{"127.0.0.1:2379"},
|
DiscoverArgs: &etcdutil.ClientArgs{
|
||||||
|
Endpoints: []string{"127.0.0.1:2379"},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func TestPushFile(t *testing.T) {
|
func TestPushFile(t *testing.T) {
|
||||||
|
@ -81,7 +81,7 @@ func (a *Builder) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.StringVar(&a.MysqlConnectionInfo, "mysql", "root:admin@tcp(127.0.0.1:3306)/region", "mysql db connection info")
|
fs.StringVar(&a.MysqlConnectionInfo, "mysql", "root:admin@tcp(127.0.0.1:3306)/region", "mysql db connection info")
|
||||||
fs.StringSliceVar(&a.EventLogServers, "event-servers", []string{"127.0.0.1:6366"}, "event log server address. simple lb")
|
fs.StringSliceVar(&a.EventLogServers, "event-servers", []string{"127.0.0.1:6366"}, "event log server address. simple lb")
|
||||||
fs.StringVar(&a.KubeConfig, "kube-config", "", "kubernetes api server config file")
|
fs.StringVar(&a.KubeConfig, "kube-config", "", "kubernetes api server config file")
|
||||||
fs.IntVar(&a.MaxTasks, "max-tasks", 0, "Maximum number of simultaneous build tasks,If set to 0, the maximum limit is twice the number of CPU cores")
|
fs.IntVar(&a.MaxTasks, "max-tasks", 50, "Maximum number of simultaneous build tasks")
|
||||||
fs.IntVar(&a.APIPort, "api-port", 3228, "the port for api server")
|
fs.IntVar(&a.APIPort, "api-port", 3228, "the port for api server")
|
||||||
fs.StringVar(&a.MQAPI, "mq-api", "127.0.0.1:6300", "acp_mq api")
|
fs.StringVar(&a.MQAPI, "mq-api", "127.0.0.1:6300", "acp_mq api")
|
||||||
fs.StringVar(&a.RunMode, "run", "sync", "sync data when worker start")
|
fs.StringVar(&a.RunMode, "run", "sync", "sync data when worker start")
|
||||||
|
@ -140,6 +140,10 @@ func (r *readEventBarrel) insertMessage(message *db.EventLogMessage) {
|
|||||||
func (r *readEventBarrel) pushCashMessage(ch chan *db.EventLogMessage, subID string) {
|
func (r *readEventBarrel) pushCashMessage(ch chan *db.EventLogMessage, subID string) {
|
||||||
r.subLock.Lock()
|
r.subLock.Lock()
|
||||||
defer r.subLock.Unlock()
|
defer r.subLock.Unlock()
|
||||||
|
//send cache message
|
||||||
|
for _, m := range r.barrel {
|
||||||
|
ch <- m
|
||||||
|
}
|
||||||
r.subSocketChan[subID] = ch
|
r.subSocketChan[subID] = ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,30 +38,60 @@ import (
|
|||||||
//NewSourceBuildCmd cmd for source build test
|
//NewSourceBuildCmd cmd for source build test
|
||||||
func NewSourceBuildCmd() cli.Command {
|
func NewSourceBuildCmd() cli.Command {
|
||||||
c := cli.Command{
|
c := cli.Command{
|
||||||
Name: "buildtest",
|
Subcommands: []cli.Command{
|
||||||
Usage: "build test source code, If it can be build, you can build in rainbond",
|
cli.Command{
|
||||||
Flags: []cli.Flag{
|
Name: "test",
|
||||||
cli.StringFlag{
|
Usage: "build test source code, If it can be build, you can build in rainbond",
|
||||||
Name: "dir",
|
Flags: []cli.Flag{
|
||||||
Usage: "source code dir,default is current dir.",
|
cli.StringFlag{
|
||||||
Value: "",
|
Name: "dir",
|
||||||
|
Usage: "source code dir,default is current dir.",
|
||||||
|
Value: "",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "lang",
|
||||||
|
Usage: "source code lang type, if not specified, will automatic identify",
|
||||||
|
Value: "",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "image",
|
||||||
|
Usage: "builder image name",
|
||||||
|
Value: builder.BUILDERIMAGENAME,
|
||||||
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "env",
|
||||||
|
Usage: "Build the required environment variables",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: build,
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.Command{
|
||||||
Name: "lang",
|
Name: "list",
|
||||||
Usage: "source code lang type, if not specified, will automatic identify",
|
Usage: "Lists the building tasks pod currently being performed",
|
||||||
Value: "",
|
Action: func(ctx *cli.Context) {
|
||||||
|
cmd := exec.Command("kubectl", "get", "pod", "-l", "job=codebuild", "-o", "wide", "-n", "rbd-system")
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
cmd.Run()
|
||||||
|
},
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.Command{
|
||||||
Name: "image",
|
Name: "log",
|
||||||
Usage: "builder image name",
|
Usage: "Displays a log of the build task",
|
||||||
Value: builder.BUILDERIMAGENAME,
|
Action: func(ctx *cli.Context) {
|
||||||
},
|
name := ctx.Args().First()
|
||||||
cli.StringSliceFlag{
|
if name == "" {
|
||||||
Name: "env",
|
showError("Please specify the task pod name")
|
||||||
Usage: "Build the required environment variables",
|
}
|
||||||
|
cmd := exec.Command("kubectl", "logs", "-f", name, "-n", "rbd-system")
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
cmd.Run()
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: build,
|
Name: "build",
|
||||||
|
Usage: "Commands related to building source code",
|
||||||
}
|
}
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
@ -17,13 +17,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var pemDirPath = ".rbd/ssl"
|
var pemDirPath = ".rbd/ssl"
|
||||||
var clientPemPath = path.Join(pemDirPath, "client.pem")
|
var clientPemPath string
|
||||||
var clientKeyPemPath = path.Join(pemDirPath, "client.key.pem")
|
var clientKeyPemPath string
|
||||||
var clientCAPemPath = path.Join(pemDirPath, "ca.pem")
|
var clientCAPemPath string
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
homePath, _ := sources.Home()
|
homePath, _ := sources.Home()
|
||||||
pemDirPath = path.Join(homePath, pemDirPath)
|
pemDirPath = path.Join(homePath, pemDirPath)
|
||||||
|
clientPemPath = path.Join(pemDirPath, "client.pem")
|
||||||
|
clientKeyPemPath = path.Join(pemDirPath, "client.key.pem")
|
||||||
|
clientCAPemPath = path.Join(pemDirPath, "ca.pem")
|
||||||
}
|
}
|
||||||
|
|
||||||
//NewCmdInstall -
|
//NewCmdInstall -
|
||||||
|
Loading…
Reference in New Issue
Block a user