merge from V5.2

This commit is contained in:
凡羊羊 2020-02-24 12:53:03 +08:00
commit 4e4ac133bf
282 changed files with 27766 additions and 892 deletions

View File

@ -59,6 +59,13 @@ func (v2 *V2) Routes() chi.Router {
r.Post("/volume-options", controller.VolumeSetVar)
r.Delete("/volume-options/{volume_type}", controller.DeleteVolumeType)
r.Put("/volume-options/{volume_type}", controller.UpdateVolumeType)
r.Mount("/enterprise/{enterprise_id}", v2.enterpriseRouter())
return r
}
func (v2 *V2) enterpriseRouter() chi.Router {
r := chi.NewRouter()
r.Get("/running-services", controller.GetRunningServices)
return r
}

View File

@ -0,0 +1,15 @@
package controller
import (
"github.com/go-chi/chi"
"github.com/goodrain/rainbond/api/handler"
httputil "github.com/goodrain/rainbond/util/http"
"net/http"
)
//GetRunningServices list all running service ids
func GetRunningServices(w http.ResponseWriter, r *http.Request) {
enterpriseID := chi.URLParam(r, "enterprise_id")
runningList := handler.GetServiceManager().GetEnterpriseRunningServices(enterpriseID)
httputil.ReturnNoFomart(r, w, 200, map[string]interface{}{"service_ids": runningList})
}

View File

@ -663,6 +663,7 @@ func (t *TenantStruct) CreateService(w http.ResponseWriter, r *http.Request) {
tenantID := r.Context().Value(middleware.ContextKey("tenant_id")).(string)
ss.TenantID = tenantID
ss.ServiceType = ss.ExtendMethod
if err := handler.GetServiceManager().ServiceCreate(&ss); err != nil {
if strings.Contains(err.Error(), "is exist in tenant") {
httputil.ReturnError(r, w, 400, fmt.Sprintf("create service error, %v", err))
@ -696,7 +697,7 @@ func (t *TenantStruct) UpdateService(w http.ResponseWriter, r *http.Request) {
// schema:
// "$ref": "#/responses/commandResponse"
// description: 统一返回格式
// TODO fanyangyang 支持组件类型的修改
logrus.Debugf("trans update service service")
//目前提供三个元素的修改
rules := validator.MapData{
@ -704,6 +705,7 @@ func (t *TenantStruct) UpdateService(w http.ResponseWriter, r *http.Request) {
"image_name": []string{},
"container_memory": []string{},
"service_name": []string{},
"extend_method": []string{},
}
data, ok := httputil.ValidatorRequestMapAndErrorResponse(r, w, rules, nil)
if !ok {

View File

@ -174,7 +174,7 @@ func dbInit() error {
if err == gorm.ErrRecordNotFound {
data := map[string]string{
"/v2/show": "server_source",
"/v2/opentsdb": "server_source",
"/v2/cluster": "server_source",
"/v2/resources": "server_source",
"/v2/builder": "server_source",
"/v2/tenants": "server_source",
@ -187,9 +187,6 @@ func dbInit() error {
"/v2/gateway/ports": "server_source",
"/v2/nodes": "node_manager",
"/v2/job": "node_manager",
"/v2/tasks": "node_manager",
"/v2/taskgroups": "node_manager",
"/v2/tasktemps": "node_manager",
"/v2/configs": "node_manager",
}
tx := begin

View File

@ -230,11 +230,11 @@ func (h *BackupHandle) snapshot(ids []string, sourceDir string) error {
ServiceID: id,
}
status := h.statusCli.GetStatus(id)
serviceType, err := db.GetManager().TenantServiceLabelDao().GetTenantServiceTypeLabel(id)
serviceInfo, err := db.GetManager().TenantServiceDao().GetServiceTypeById(id)
if err != nil {
return fmt.Errorf("Get service deploy type error,%s", err.Error())
}
if status != v1.CLOSED && serviceType != nil && serviceType.LabelValue == core_util.StatefulServiceType {
if status != v1.CLOSED && serviceInfo != nil && serviceInfo.IsState() { // TODO fanyangyang根据组件类型确定是否支持
return fmt.Errorf("Statefulset app must be closed before backup,%s", err.Error())
}
data.ServiceStatus = status

View File

@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strconv"
@ -30,26 +31,29 @@ import (
"github.com/Sirupsen/logrus"
"github.com/coreos/etcd/clientv3"
api_model "github.com/goodrain/rainbond/api/model"
"github.com/jinzhu/gorm"
"github.com/pquerna/ffjson/ffjson"
"github.com/twinj/uuid"
"github.com/goodrain/rainbond/api/proxy"
"github.com/goodrain/rainbond/api/util"
"github.com/goodrain/rainbond/builder/parser"
"github.com/goodrain/rainbond/cmd/api/option"
"github.com/goodrain/rainbond/db"
dberrors "github.com/goodrain/rainbond/db/errors"
core_model "github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
eventutil "github.com/goodrain/rainbond/eventlog/util"
gclient "github.com/goodrain/rainbond/mq/client"
core_util "github.com/goodrain/rainbond/util"
"github.com/goodrain/rainbond/worker/client"
"github.com/goodrain/rainbond/worker/discover/model"
"github.com/goodrain/rainbond/worker/server"
"github.com/goodrain/rainbond/worker/server/pb"
"github.com/jinzhu/gorm"
"github.com/pquerna/ffjson/ffjson"
"github.com/twinj/uuid"
api_model "github.com/goodrain/rainbond/api/model"
dberrors "github.com/goodrain/rainbond/db/errors"
core_model "github.com/goodrain/rainbond/db/model"
dbmodel "github.com/goodrain/rainbond/db/model"
eventutil "github.com/goodrain/rainbond/eventlog/util"
gclient "github.com/goodrain/rainbond/mq/client"
core_util "github.com/goodrain/rainbond/util"
typesv1 "github.com/goodrain/rainbond/worker/appm/types/v1"
)
// ErrServiceNotClosed -
@ -249,17 +253,12 @@ func (s *ServiceAction) AddLabel(l *api_model.LabelsStruct, serviceID string) er
tx.Rollback()
}
}()
//V5.2: do not support service type label
for _, label := range l.Labels {
var labelModel dbmodel.TenantServiceLable
switch label.LabelKey {
case core_model.LabelKeyServiceType:
labelModel.ServiceID = serviceID
labelModel.LabelKey = core_model.LabelKeyServiceType
labelModel.LabelValue = chekeServiceLabel(label.LabelValue)
default:
labelModel.ServiceID = serviceID
labelModel.LabelKey = label.LabelKey
labelModel.LabelValue = label.LabelValue
labelModel := dbmodel.TenantServiceLable{
ServiceID: serviceID,
LabelKey: label.LabelKey,
LabelValue: label.LabelValue,
}
if err := db.GetManager().TenantServiceLabelDaoTransactions(tx).AddModel(&labelModel); err != nil {
tx.Rollback()
@ -291,17 +290,12 @@ func (s *ServiceAction) UpdateLabel(l *api_model.LabelsStruct, serviceID string)
tx.Rollback()
return err
}
// V5.2 do not support service type label
// add new labels
var labelModel dbmodel.TenantServiceLable
switch label.LabelKey {
case core_model.LabelKeyServiceType:
labelModel.ServiceID = serviceID
labelModel.LabelKey = core_model.LabelKeyServiceType
labelModel.LabelValue = chekeServiceLabel(label.LabelValue)
default:
labelModel.ServiceID = serviceID
labelModel.LabelKey = label.LabelKey
labelModel.LabelValue = label.LabelValue
labelModel := dbmodel.TenantServiceLable{
ServiceID: serviceID,
LabelKey: label.LabelKey,
LabelValue: label.LabelValue,
}
if err := db.GetManager().TenantServiceLabelDaoTransactions(tx).AddModel(&labelModel); err != nil {
logrus.Errorf("error adding new labels: %v", err)
@ -342,24 +336,6 @@ func (s *ServiceAction) DeleteLabel(l *api_model.LabelsStruct, serviceID string)
return nil
}
//UpdateServiceLabel UpdateLabel
func (s *ServiceAction) UpdateServiceLabel(serviceID, value string) error {
sls, err := db.GetManager().TenantServiceLabelDao().GetTenantServiceLabel(serviceID)
if err != nil {
return err
}
if len(sls) > 0 {
for _, sl := range sls {
sl.ServiceID = serviceID
sl.LabelKey = core_model.LabelKeyServiceType
value = chekeServiceLabel(value)
sl.LabelValue = value
return db.GetManager().TenantServiceLabelDao().UpdateModel(sl)
}
}
return fmt.Errorf("Get tenant service label error")
}
//StartStopService start service
func (s *ServiceAction) StartStopService(sss *api_model.StartStopStruct) error {
services, err := db.GetManager().TenantServiceDao().GetServiceByID(sss.ServiceID)
@ -636,14 +612,16 @@ func (s *ServiceAction) ServiceCreate(sc *api_model.ServiceStruct) error {
}
}
//set app label
if err := db.GetManager().TenantServiceLabelDaoTransactions(tx).AddModel(&dbmodel.TenantServiceLable{
ServiceID: ts.ServiceID,
LabelKey: core_model.LabelKeyServiceType,
LabelValue: sc.ServiceLabel,
}); err != nil {
logrus.Errorf("add label %v error, %v", ts.ServiceID, err)
tx.Rollback()
return err
if sc.OSType == "windows" {
if err := db.GetManager().TenantServiceLabelDaoTransactions(tx).AddModel(&dbmodel.TenantServiceLable{
ServiceID: ts.ServiceID,
LabelKey: core_model.LabelKeyNodeSelector,
LabelValue: sc.OSType,
}); err != nil {
logrus.Errorf("add label %s=%s %v error, %v", core_model.LabelKeyNodeSelector, sc.OSType, ts.ServiceID, err)
tx.Rollback()
return err
}
}
// sc.Endpoints can't be nil
// sc.Endpoints.Discovery or sc.Endpoints.Static can't be nil
@ -740,15 +718,43 @@ func (s *ServiceAction) ServiceUpdate(sc map[string]interface{}) error {
if sc["service_name"] != nil {
ts.ServiceName = sc["service_name"].(string)
}
if sc["extend_method"] != nil {
extendMethod := sc["extend_method"].(string)
if ts.Replicas > 1 && dbmodel.ServiceType(extendMethod).IsSingleton() {
err := fmt.Errorf("service[%s] replicas > 1, can't change service typ to stateless_singleton", ts.ServiceAlias)
return err
}
volumes, err := db.GetManager().TenantServiceVolumeDao().GetTenantServiceVolumesByServiceID(ts.ServiceID)
if err != nil {
return err
}
for _, vo := range volumes {
if vo.VolumeType == dbmodel.ShareFileVolumeType.String() || vo.VolumeType == dbmodel.MemoryFSVolumeType.String() {
continue
}
if vo.VolumeType == dbmodel.LocalVolumeType.String() && !dbmodel.ServiceType(extendMethod).IsState() {
err := fmt.Errorf("service[%s] has local volume type, can't change type to stateless", ts.ServiceAlias)
return err
}
if vo.AccessMode == "RWO" && !dbmodel.ServiceType(extendMethod).IsState() {
err := fmt.Errorf("service[%s] volume[%s] access_mode is RWO, can't change type to stateless", ts.ServiceAlias, vo.VolumeName)
return err
}
}
ts.ExtendMethod = extendMethod
ts.ServiceType = extendMethod
}
//update service
if err := db.GetManager().TenantServiceDao().UpdateModel(ts); err != nil {
logrus.Errorf("update service error, %v", err)
return err
}
//update service version
if err := db.GetManager().VersionInfoDao().UpdateModel(version); err != nil {
logrus.Errorf("update version error, %v", err)
return err
if version != nil {
if err := db.GetManager().VersionInfoDao().UpdateModel(version); err != nil {
logrus.Errorf("update version error, %v", err)
return err
}
}
return nil
}
@ -1343,11 +1349,11 @@ func (s *ServiceAction) VolumnVar(tsv *dbmodel.TenantServiceVolume, tenantID, fi
tsv.HostPath = fmt.Sprintf("%s/tenant/%s/service/%s%s", sharePath, tenantID, tsv.ServiceID, tsv.VolumePath)
//本地文件存储
case dbmodel.LocalVolumeType.String():
serviceType, err := db.GetManager().TenantServiceLabelDao().GetTenantServiceTypeLabel(tsv.ServiceID)
serviceInfo, err := db.GetManager().TenantServiceDao().GetServiceTypeById(tsv.ServiceID)
if err != nil {
return util.CreateAPIHandleErrorFromDBError("service type", err)
}
if serviceType == nil || serviceType.LabelValue != core_util.StatefulServiceType {
if serviceInfo == nil || !serviceInfo.IsState() {
return util.CreateAPIHandleError(400, fmt.Errorf("应用类型不为有状态应用.不支持本地存储"))
}
tsv.HostPath = fmt.Sprintf("%s/tenant/%s/service/%s%s", localPath, tenantID, tsv.ServiceID, tsv.VolumePath)
@ -1659,6 +1665,39 @@ func (s *ServiceAction) GetServicesStatus(tenantID string, serviceIDs []string)
return nil
}
// GetMultiTenantsRunningServices get running services
func (s *ServiceAction) GetEnterpriseRunningServices(enterpriseID string) []string {
var tenantIDs []string
tenants, err := db.GetManager().EnterpriseDao().GetEnterpriseTenants(enterpriseID)
if err != nil {
logrus.Errorf("list tenant failed: %s", err.Error())
return []string{}
}
for _, tenant := range tenants {
tenantIDs = append(tenantIDs, tenant.UUID)
}
if len(tenantIDs) == 0 {
return []string{}
}
services, err := db.GetManager().TenantServiceDao().GetServicesByTenantIDs(tenantIDs)
if err != nil {
logrus.Errorf("list tenants servicee failed: %s", err.Error())
return []string{}
}
var serviceIDs []string
for _, svc := range services {
serviceIDs = append(serviceIDs, svc.ServiceID)
}
statusList := s.statusCli.GetStatuss(strings.Join(serviceIDs, ","))
retServices := make([]string, 0, 10)
for service, status := range statusList {
if status == typesv1.RUNNING {
retServices = append(retServices, service)
}
}
return retServices
}
//CreateTenant create tenant
func (s *ServiceAction) CreateTenant(t *dbmodel.Tenants) error {
if ten, _ := db.GetManager().TenantDao().GetTenantIDByName(t.Name); ten != nil {
@ -1762,7 +1801,7 @@ func (s *ServiceAction) GetPodContainerMemory(podNames []string) (map[string]map
memoryUsageMap := make(map[string]map[string]string, 10)
proxy := GetPrometheusProxy()
queryName := strings.Join(podNames, "|")
query := fmt.Sprintf(`container_memory_rss{pod_name=~"%s"}`, queryName)
query := fmt.Sprintf(`container_memory_rss{pod=~"%s"}`, queryName)
proQuery := strings.Replace(query, " ", "%20", -1)
req, err := http.NewRequest("GET", fmt.Sprintf("http://127.0.0.1:9999/api/v1/query?query=%s", proQuery), nil)
if err != nil {
@ -2178,13 +2217,3 @@ func CheckMapKey(rebody map[string]interface{}, key string, defaultValue interfa
rebody[key] = defaultValue
return rebody
}
func chekeServiceLabel(v string) string {
if strings.Contains(v, "有状态") {
return core_util.StatefulServiceType
}
if strings.Contains(v, "无状态") {
return core_util.StatelessServiceType
}
return v
}

View File

@ -33,7 +33,6 @@ type ServiceHandler interface {
AddLabel(l *api_model.LabelsStruct, serviceID string) error
DeleteLabel(l *api_model.LabelsStruct, serviceID string) error
UpdateLabel(l *api_model.LabelsStruct, serviceID string) error
UpdateServiceLabel(serviceID, value string) error
StartStopService(s *api_model.StartStopStruct) error
ServiceVertical(v *model.VerticalScalingTaskBody) error
ServiceHorizontal(h *model.HorizontalScalingTaskBody) error
@ -59,6 +58,7 @@ type ServiceHandler interface {
RollBack(rs *api_model.RollbackStruct) error
GetStatus(serviceID string) (*api_model.StatusList, error)
GetServicesStatus(tenantID string, services []string) map[string]string
GetEnterpriseRunningServices(enterpriseID string) []string
CreateTenant(*dbmodel.Tenants) error
CreateTenandIDAndName(eid string) (string, string, error)
GetPods(serviceID string) (*K8sPodInfos, error)

View File

@ -258,6 +258,10 @@ type ServiceStruct struct {
// in: body
// required: true
ServiceAlias string `json:"service_alias" validate:"service_alias"`
// 组件类型
// in: body
// required: true
ServiceType string `json:"service_type" validate:"service_type"`
// 服务描述
// in: body
// required: false
@ -286,7 +290,7 @@ type ServiceStruct struct {
// in: body
// required: false
ContainerEnv string `json:"container_env" validate:"container_env"`
// 扩容方式0:无状态1:有状态2:分区
// 扩容方式0:无状态1:有状态2:分区(v5.2用于接收组件的类型)
// in: body
// required: false
ExtendMethod string `json:"extend_method" validate:"extend_method"`
@ -321,9 +325,13 @@ type ServiceStruct struct {
// 服务创建类型cloud云市服务,assistant云帮服务
// in: body
// required: false
ServiceOrigin string `json:"service_origin" validate:"service_origin"`
Kind string `json:"kind" validate:"kind|in:internal,third_party"`
EtcdKey string `json:"etcd_key" validate:"etcd_key"`
ServiceOrigin string `json:"service_origin" validate:"service_origin"`
Kind string `json:"kind" validate:"kind|in:internal,third_party"`
EtcdKey string `json:"etcd_key" validate:"etcd_key"`
//OSType runtime os type
// in: body
// required: false
OSType string `json:"os_type" validate:"os_type|in:windows,linux"`
ServiceLabel string `json:"service_label" validate:"service_label|in:StatelessServiceType,StatefulServiceType"`
NodeLabel string `json:"node_label" validate:"node_label"`
Operator string `json:"operator" validate:"operator"`

View File

@ -21,7 +21,6 @@ package build
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/goodrain/rainbond/builder"
@ -120,15 +119,6 @@ func GetBuild(lang code.Lang) (Build, error) {
}
//CreateImageName create image name
func CreateImageName(repoURL, serviceAlias, deployversion string) string {
reg := regexp.MustCompile(`.*(?:\:|\/)([\w\-\.]+)/([\w\-\.]+)\.git`)
rc := reg.FindSubmatch([]byte(repoURL))
var name string
if len(rc) == 3 {
name = fmt.Sprintf("%s_%s_%s", serviceAlias, string(rc[1]), string(rc[2]))
} else {
name = fmt.Sprintf("%s_%s", serviceAlias, "rainbondbuild")
}
buildImageName := strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, name, deployversion))
return buildImageName
func CreateImageName(serviceID, deployversion string) string {
return strings.ToLower(fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, serviceID, deployversion))
}

View File

@ -272,7 +272,6 @@ func (s *slugBuild) runBuildJob(re *Request) error {
corev1.EnvVar{Name: "SERVICE_ID", Value: re.ServiceID},
corev1.EnvVar{Name: "TENANT_ID", Value: re.TenantID},
corev1.EnvVar{Name: "LANGUAGE", Value: re.Lang.String()},
corev1.EnvVar{Name: "DEBUG", Value: "true"},
}
for k, v := range re.BuildEnvs {
envs = append(envs, corev1.EnvVar{Name: k, Value: v})
@ -318,7 +317,7 @@ func (s *slugBuild) runBuildJob(re *Request) error {
logrus.Debugf("slug subpath is : %s", slugSubPath)
appSubPath := strings.TrimPrefix(re.SourceDir, "/cache/")
logrus.Debugf("app subpath is : %s", appSubPath)
cacheSubPath := strings.TrimPrefix((re.CacheDir), "/cache/")
cacheSubPath := strings.TrimPrefix(re.CacheDir, "/cache/")
container.VolumeMounts = []corev1.VolumeMount{
corev1.VolumeMount{
Name: "app",
@ -470,7 +469,7 @@ func getJobPodLogs(ctx context.Context, podChan chan struct{}, clientset kuberne
func delete(clientset kubernetes.Interface, namespace, job string) {
logrus.Debugf("start delete job: %s", job)
listOptions := metav1.ListOptions{LabelSelector:fmt.Sprintf("job-name=%s", job)}
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("job-name=%s", job)}
if err := clientset.CoreV1().Pods(namespace).DeleteCollection(&metav1.DeleteOptions{}, listOptions); err != nil {
logrus.Errorf("delete job pod failed: %s", err.Error())

View File

@ -46,7 +46,7 @@ func (d *dockerfileBuild) Build(re *Request) (*Response, error) {
re.Logger.Error(fmt.Sprintf("Parse dockerfile error"), map[string]string{"step": "builder-exector"})
return nil, err
}
buildImageName := CreateImageName(re.RepositoryURL, re.ServiceAlias, re.DeployVersion)
buildImageName := CreateImageName(re.ServiceID, re.DeployVersion)
buildOptions := types.ImageBuildOptions{
Tags: []string{buildImageName},

View File

@ -81,7 +81,7 @@ func (d *netcoreBuild) Build(re *Request) (*Response, error) {
return nil, fmt.Errorf("write default build dockerfile error:%s", err.Error())
}
d.sourceDir = re.SourceDir
d.imageName = CreateImageName(re.RepositoryURL, re.ServiceAlias, re.DeployVersion)
d.imageName = CreateImageName(re.ServiceID, re.DeployVersion)
d.buildImageName = d.imageName + "_build"
//build code
buildOptions := types.ImageBuildOptions{

View File

@ -25,11 +25,11 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/client"
"github.com/goodrain/rainbond/builder"
"github.com/goodrain/rainbond/builder/build"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/event"
"github.com/tidwall/gjson" //"github.com/docker/docker/api/types"
//"github.com/docker/docker/client"
"github.com/tidwall/gjson"
)
//ImageBuildItem ImageBuildItem
@ -79,7 +79,7 @@ func (i *ImageBuildItem) Run(timeout time.Duration) error {
i.Logger.Error(fmt.Sprintf("获取指定镜像: %s失败", i.Image), map[string]string{"step": "builder-exector", "status": "failure"})
return err
}
localImageURL := i.ImageNameHandler(i.Image)
localImageURL := build.CreateImageName(i.ServiceID, i.DeployVersion)
if err := sources.ImageTag(i.DockerClient, i.Image, localImageURL, i.Logger, 1); err != nil {
logrus.Errorf("change image tag error: %s", err.Error())
i.Logger.Error(fmt.Sprintf("修改镜像tag: %s -> %s 失败", i.Image, localImageURL), map[string]string{"step": "builder-exector", "status": "failure"})
@ -99,13 +99,6 @@ func (i *ImageBuildItem) Run(timeout time.Duration) error {
return nil
}
//ImageNameHandler 根据平台配置处理镜像名称
func (i *ImageBuildItem) ImageNameHandler(source string) string {
imageModel := sources.ImageNameHandle(source)
localImageURL := fmt.Sprintf("%s/%s:%s", builder.REGISTRYDOMAIN, imageModel.Name, i.DeployVersion)
return localImageURL
}
//StorageVersionInfo 存储version信息
func (i *ImageBuildItem) StorageVersionInfo(imageURL string) error {
version, err := db.GetManager().VersionInfoDao().GetVersionByDeployVersion(i.DeployVersion, i.ServiceID)

View File

@ -426,13 +426,3 @@ func GetVolumeDir() (string, string) {
}
return localPath, sharePath
}
//GetServiceType get service deploy type
func GetServiceType(labels []*dbmodel.TenantServiceLable) string {
for _, l := range labels {
if l.LabelKey == dbmodel.LabelKeyServiceType {
return l.LabelValue
}
}
return util.StatelessServiceType
}

View File

@ -20,7 +20,9 @@ package exector
import (
"fmt"
dbmodel "github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/util"
"strings"
"testing"
)
@ -59,3 +61,16 @@ func TestUploadPkg2(t *testing.T) {
t.Error(err)
}
}
func TestBackupServiceVolume(t *testing.T) {
volume := dbmodel.TenantServiceVolume{}
sourceDir := ""
serviceID := ""
dstDir := fmt.Sprintf("%s/data_%s/%s.zip", sourceDir, serviceID, strings.Replace(volume.VolumeName, "/", "", -1))
hostPath := volume.HostPath
if hostPath != "" && !util.DirIsEmpty(hostPath) {
if err := util.Zip(hostPath, dstDir); err != nil {
t.Fatalf("backup service(%s) volume(%s) data error.%s", serviceID, volume.VolumeName, err.Error())
}
}
}

View File

@ -213,8 +213,9 @@ func (b *BackupAPPRestore) restoreVersionAndData(backup *dbmodel.AppBackup, appS
os.MkdirAll(volume.HostPath, 0777)
continue
}
//if app type is statefulset, change pod hostpath
if GetServiceType(app.ServiceLabel) == util.StatefulServiceType {
if app.Service.IsState() { // TODO fanyangyang 备份时需要备份组件的类型
//Next two level directory
list, err := util.GetDirList(tmpDir, 2)
if err != nil {

View File

@ -20,6 +20,7 @@ package parser
import (
"fmt"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
@ -44,17 +45,17 @@ type DockerComposeParse struct {
//ServiceInfoFromDC service info from dockercompose
type ServiceInfoFromDC struct {
ports map[int]*types.Port
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
memory int
image Image
args []string
depends []string
imageAlias string
deployType string
name string
ports map[int]*types.Port
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
memory int
image Image
args []string
depends []string
imageAlias string
serviceType string
name string
}
//GetPorts 获取端口列表
@ -163,7 +164,7 @@ func (d *DockerComposeParse) Parse() ParseErrorList {
if sc.DependsON != nil {
service.depends = sc.DependsON
}
service.deployType = DetermineDeployType(service.image)
service.serviceType = DetermineDeployType(service.image)
d.services[kev] = &service
}
for serviceName, service := range d.services {
@ -212,16 +213,17 @@ func (d *DockerComposeParse) GetServiceInfo() []ServiceInfo {
var sis []ServiceInfo
for _, service := range d.services {
si := ServiceInfo{
Ports: service.GetPorts(),
Envs: service.GetEnvs(),
Volumes: service.GetVolumes(),
Image: service.image,
Args: service.args,
DependServices: service.depends,
ImageAlias: service.imageAlias,
ServiceDeployType: service.deployType,
Name: service.name,
Cname: service.name,
Ports: service.GetPorts(),
Envs: service.GetEnvs(),
Volumes: service.GetVolumes(),
Image: service.image,
Args: service.args,
DependServices: service.depends,
ImageAlias: service.imageAlias,
ServiceType: service.serviceType,
Name: service.name,
Cname: service.name,
OS: runtime.GOOS,
}
if service.memory != 0 {
si.Memory = service.memory

View File

@ -27,6 +27,7 @@ import (
"github.com/goodrain/rainbond/db/model"
"github.com/goodrain/rainbond/event"
"github.com/goodrain/rainbond/util"
"runtime"
"strconv"
"strings" //"github.com/docker/docker/client"
)
@ -38,7 +39,7 @@ type DockerRunOrImageParse struct {
volumes map[string]*types.Volume
envs map[string]*types.Env
source string
deployType string
serviceType string
memory int
image Image
args []string
@ -133,7 +134,7 @@ func (d *DockerRunOrImageParse) Parse() ParseErrorList {
}
}
}
d.deployType = DetermineDeployType(d.image)
d.serviceType = DetermineDeployType(d.image)
return d.errors
}
@ -267,14 +268,15 @@ func (d *DockerRunOrImageParse) GetMemory() int {
//GetServiceInfo 获取service info
func (d *DockerRunOrImageParse) GetServiceInfo() []ServiceInfo {
serviceInfo := ServiceInfo{
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
ServiceDeployType: d.deployType,
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
ServiceType: d.serviceType,
OS: runtime.GOOS,
}
if serviceInfo.Memory == 0 {
serviceInfo.Memory = 512

View File

@ -20,6 +20,7 @@ package parser
import (
"fmt"
dbmodel "github.com/goodrain/rainbond/db/model"
"strings"
"github.com/Sirupsen/logrus"
@ -28,7 +29,6 @@ import (
"github.com/goodrain/rainbond/builder/parser/discovery"
"github.com/goodrain/rainbond/builder/parser/types"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/util"
"k8s.io/apimachinery/pkg/api/resource"
)
@ -165,21 +165,22 @@ type Lang string
//ServiceInfo 智能获取的应用信息
type ServiceInfo struct {
ID string `json:"id,omitempty"`
Ports []types.Port `json:"ports,omitempty"`
Envs []types.Env `json:"envs,omitempty"`
Volumes []types.Volume `json:"volumes,omitempty"`
Image Image `json:"image,omitempty"`
Args []string `json:"args,omitempty"`
DependServices []string `json:"depends,omitempty"`
ServiceDeployType string `json:"deploy_type,omitempty"`
Branchs []string `json:"branchs,omitempty"`
Memory int `json:"memory,omitempty"`
Lang code.Lang `json:"language,omitempty"`
ImageAlias string `json:"image_alias,omitempty"`
ID string `json:"id,omitempty"`
Ports []types.Port `json:"ports,omitempty"`
Envs []types.Env `json:"envs,omitempty"`
Volumes []types.Volume `json:"volumes,omitempty"`
Image Image `json:"image,omitempty"`
Args []string `json:"args,omitempty"`
DependServices []string `json:"depends,omitempty"`
ServiceType string `json:"service_type,omitempty"`
Branchs []string `json:"branchs,omitempty"`
Memory int `json:"memory,omitempty"`
Lang code.Lang `json:"language,omitempty"`
ImageAlias string `json:"image_alias,omitempty"`
//For third party services
Endpoints []*discovery.Endpoint `json:"endpoints,omitempty"`
//os type,default linux
OS string `json:"os"`
Name string `json:"name,omitempty"` // module name
Cname string `json:"cname,omitempty"` // service cname
Packaging string `json:"packaging,omitempty"`
@ -236,10 +237,10 @@ var dbImageKey = []string{
func DetermineDeployType(imageName Image) string {
for _, key := range dbImageKey {
if strings.ToLower(imageName.GetSimpleName()) == key {
return util.StatefulServiceType
return dbmodel.ServiceTypeStateSingleton.String()
}
}
return util.StatelessServiceType
return dbmodel.ServiceTypeStatelessMultiple.String()
}
//readmemory

View File

@ -21,6 +21,7 @@ package parser
import (
"fmt"
"path"
"runtime"
"strconv"
"strings"
@ -477,15 +478,16 @@ func (d *SourceCodeParse) GetLang() code.Lang {
//GetServiceInfo 获取service info
func (d *SourceCodeParse) GetServiceInfo() []ServiceInfo {
serviceInfo := ServiceInfo{
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
Lang: d.GetLang(),
ServiceDeployType: util.StatelessServiceType,
Ports: d.GetPorts(),
Envs: d.GetEnvs(),
Volumes: d.GetVolumes(),
Image: d.GetImage(),
Args: d.GetArgs(),
Branchs: d.GetBranchs(),
Memory: d.memory,
Lang: d.GetLang(),
ServiceType: model.ServiceTypeStatelessMultiple.String(),
OS: runtime.GOOS,
}
var res []ServiceInfo
if d.isMulti && d.services != nil && len(d.services) > 0 {

View File

@ -20,6 +20,7 @@ package option
import (
"fmt"
"runtime"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/mq/client"
@ -107,5 +108,8 @@ func (a *Builder) CheckConfig() error {
if a.Topic != client.BuilderTopic && a.Topic != client.WindowsBuilderTopic {
return fmt.Errorf("Topic is only suppory `%s` and `%s`", client.BuilderTopic, client.WindowsBuilderTopic)
}
if runtime.GOOS == "windows" {
a.Topic = "windows_builder"
}
return nil
}

View File

@ -99,7 +99,7 @@ type ListenPorts struct {
// AddFlags adds flags
func (g *GWServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&g.LogLevel, "log-level", "debug", "the gateway log level")
fs.StringVar(&g.K8SConfPath, "kube-conf", "/opt/rainbond/etc/kubernetes/kubecfg/admin.kubeconfig", "absolute path to the kubeconfig file")
fs.StringVar(&g.K8SConfPath, "kube-conf", "", "absolute path to the kubeconfig file")
fs.IntVar(&g.ListenPorts.Status, "status-port", 18080, `Port to use for the lua HTTP endpoint configuration.`)
fs.IntVar(&g.ListenPorts.Stream, "stream-port", 18081, `Port to use for the lua TCP/UDP endpoint configuration.`)
fs.IntVar(&g.ListenPorts.Health, "healthz-port", 10254, `Port to use for the healthz endpoint.`)
@ -118,6 +118,9 @@ func (g *GWServer) AddFlags(fs *pflag.FlagSet) {
// etcd
fs.StringSliceVar(&g.EtcdEndpoint, "etcd-endpoints", []string{"http://127.0.0.1:2379"}, "etcd cluster endpoints.")
fs.IntVar(&g.EtcdTimeout, "etcd-timeout", 10, "etcd http timeout seconds")
fs.StringVar(&g.EtcdCaFile, "etcd-ca", "", "etcd tls ca file ")
fs.StringVar(&g.EtcdCertFile, "etcd-cert", "", "etcd tls cert file")
fs.StringVar(&g.EtcdKeyFile, "etcd-key", "", "etcd http tls cert key file")
// health check
fs.StringVar(&g.HealthPath, "health-path", "/healthz", "absolute path to the kubeconfig file")
fs.DurationVar(&g.HealthCheckTimeout, "health-check-timeout", 10, `Time limit, in seconds, for a probe to health-check-path to succeed.`)

View File

@ -35,7 +35,6 @@ var config Config
//Config Config
type Config struct {
RegionMysql RegionMysql `yaml:"region_db"`
Kubernets Kubernets `yaml:"kube"`
RegionAPI region.APIConf `yaml:"region_api"`
DockerLogPath string `yaml:"docker_log_path"`
@ -51,7 +50,7 @@ type RegionMysql struct {
//Kubernets Kubernets
type Kubernets struct {
Master string `yaml:"master"`
KubeConf string `yaml:"kube-conf"`
}
//LoadConfig 加载配置
@ -60,12 +59,6 @@ func LoadConfig(ctx *cli.Context) (Config, error) {
RegionAPI: region.APIConf{
Endpoints: []string{"http://127.0.0.1:8888"},
},
RegionMysql: RegionMysql{
User: os.Getenv("MYSQL_USER"),
Pass: os.Getenv("MYSQL_PASS"),
URL: os.Getenv("MYSQL_URL"),
Database: os.Getenv("MYSQL_DB"),
},
}
configfile := ctx.GlobalString("config")
if configfile == "" {

View File

@ -30,6 +30,7 @@ import (
"github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
endpointapi "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
envoyv2 "github.com/goodrain/rainbond/node/core/envoy/v2"
"github.com/goodrain/rainbond/util"
"google.golang.org/grpc"
)
@ -39,16 +40,18 @@ import (
//------- cds: discover all dependent services
//------- sds: every service has at least one Ready instance
type DependServiceHealthController struct {
listeners []v2.Listener
clusters []v2.Cluster
sdsHost []v2.ClusterLoadAssignment
interval time.Duration
envoyDiscoverVersion string //only support v2
checkFunc []func() bool
endpointClient v2.EndpointDiscoveryServiceClient
dependServiceCount int
clusterID string
dependServiceNames []string
listeners []v2.Listener
clusters []v2.Cluster
sdsHost []v2.ClusterLoadAssignment
interval time.Duration
envoyDiscoverVersion string //only support v2
checkFunc []func() bool
endpointClient v2.EndpointDiscoveryServiceClient
clusterClient v2.ClusterDiscoveryServiceClient
dependServiceCount int
clusterID string
dependServiceNames []string
ignoreCheckEndpointsClusterName []string
}
//NewDependServiceHealthController create a controller
@ -77,6 +80,7 @@ func NewDependServiceHealthController() (*DependServiceHealthController, error)
return nil, err
}
dsc.endpointClient = v2.NewEndpointDiscoveryServiceClient(cli)
dsc.clusterClient = v2.NewClusterDiscoveryServiceClient(cli)
nameIDs := strings.Split(os.Getenv("DEPEND_SERVICE"), ",")
for _, nameID := range nameIDs {
if len(strings.Split(nameID, ":")) > 0 {
@ -115,11 +119,36 @@ func (d *DependServiceHealthController) checkListener() bool {
}
func (d *DependServiceHealthController) checkClusters() bool {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
res, err := d.clusterClient.FetchClusters(ctx, &v2.DiscoveryRequest{
Node: &core.Node{
Cluster: d.clusterID,
Id: d.clusterID,
},
})
if err != nil {
logrus.Errorf("discover depend services cluster failure %s", err.Error())
return false
}
clusters := envoyv2.ParseClustersResource(res.Resources)
d.ignoreCheckEndpointsClusterName = nil
for _, cluster := range clusters {
if cluster.Type == v2.Cluster_LOGICAL_DNS {
d.ignoreCheckEndpointsClusterName = append(d.ignoreCheckEndpointsClusterName, cluster.Name)
}
}
d.clusters = clusters
return true
}
func (d *DependServiceHealthController) checkEDS() bool {
logrus.Infof("start checking eds; dependent service cluster names: %s", d.dependServiceNames)
if len(d.clusters) == len(d.ignoreCheckEndpointsClusterName) {
logrus.Info("all dependent services is domain third service.")
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
res, err := d.endpointClient.FetchEndpoints(ctx, &v2.DiscoveryRequest{
@ -147,7 +176,11 @@ func (d *DependServiceHealthController) checkEDS() bool {
if ready, exist := readyClusters[serviceName]; exist && ready {
continue
}
ready := func() bool {
if util.StringArrayContains(d.ignoreCheckEndpointsClusterName, cla.ClusterName) {
return true
}
if len(cla.Endpoints) > 0 && len(cla.Endpoints[0].LbEndpoints) > 0 {
// first LbEndpoints healthy is not nil. so endpoint is not notreadyaddress
if host, ok := cla.Endpoints[0].LbEndpoints[0].HostIdentifier.(*endpointapi.LbEndpoint_Endpoint); ok {

View File

@ -20,6 +20,7 @@ package healthy
import (
"context"
"fmt"
"testing"
yaml "gopkg.in/yaml.v2"
@ -33,9 +34,9 @@ import (
v2 "github.com/envoyproxy/go-control-plane/envoy/api/v2"
)
var testClusterID = "2bf54c5a0b5a48a890e2dda8635cb507_2591d9904fc4480c9c012037697f98c6_grc9e8e3"
var testClusterID = "8cd9214e6b3d4476942b600f41bfefea_tcpmeshd3d6a722b632b854b6c232e4895e0cc6_gr5e0cc6"
var testXDSHost = "192.168.195.1:6101"
var testXDSHost = "39.104.66.227:6101"
// var testClusterID = "2bf54c5a0b5a48a890e2dda8635cb507_tcpmeshed6827c0afdda50599b4108105c9e8e3_grc9e8e3"
//var testXDSHost = "127.0.0.1:6101"
@ -87,7 +88,12 @@ func TestClientCluster(t *testing.T) {
}
t.Logf("version %s", res.GetVersionInfo())
clusters := envoyv2.ParseClustersResource(res.Resources)
printYaml(t, clusters)
for _, cluster := range clusters {
if cluster.Type == v2.Cluster_LOGICAL_DNS {
fmt.Println(cluster.Name)
}
printYaml(t, cluster)
}
}
func printYaml(t *testing.T, data interface{}) {
@ -117,6 +123,9 @@ func TestClientEndpoint(t *testing.T) {
}
t.Logf("version %s", res.GetVersionInfo())
endpoints := envoyv2.ParseLocalityLbEndpointsResource(res.Resources)
for _, e := range endpoints {
fmt.Println(e.GetClusterName())
}
printYaml(t, endpoints)
}

View File

@ -136,6 +136,9 @@ func NewConfig() *Config {
//AddFlag monitor flag
func (c *Config) AddFlag(cmd *pflag.FlagSet) {
cmd.StringVar(&c.EtcdEndpointsLine, "etcd-endpoints", c.EtcdEndpointsLine, "etcd endpoints list.")
cmd.StringVar(&c.EtcdCaFile, "etcd-ca", "", "etcd tls ca file ")
cmd.StringVar(&c.EtcdCertFile, "etcd-cert", "", "etcd tls cert file")
cmd.StringVar(&c.EtcdKeyFile, "etcd-key", "", "etcd http tls cert key file")
cmd.StringVar(&c.AdvertiseAddr, "advertise-addr", c.AdvertiseAddr, "advertise address, and registry into etcd.")
cmd.IntVar(&c.CadvisorListenPort, "cadvisor-listen-port", c.CadvisorListenPort, "kubelet cadvisor listen port in all node")
cmd.StringSliceVar(&c.AlertManagerURL, "alertmanager-address", c.AlertManagerURL, "AlertManager url.")

View File

@ -54,6 +54,9 @@ func (a *MQServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&a.LogLevel, "log-level", "info", "the mq log level")
fs.StringSliceVar(&a.EtcdEndPoints, "etcd-endpoints", []string{"http://127.0.0.1:2379"}, "etcd v3 cluster endpoints.")
fs.IntVar(&a.EtcdTimeout, "etcd-timeout", 10, "etcd http timeout seconds")
fs.StringVar(&a.EtcdCaFile, "etcd-ca", "", "etcd tls ca file ")
fs.StringVar(&a.EtcdCertFile, "etcd-cert", "", "etcd tls cert file")
fs.StringVar(&a.EtcdKeyFile, "etcd-key", "", "etcd http tls cert key file")
fs.StringVar(&a.EtcdPrefix, "etcd-prefix", "/mq", "the etcd data save key prefix ")
fs.IntVar(&a.APIPort, "api-port", 6300, "the api server listen port")
fs.StringVar(&a.RunMode, "mode", "grpc", "the api server run mode grpc or http")

View File

@ -121,8 +121,10 @@ type Conf struct {
// ImageGCPeriod is the period for performing image garbage collection.
ImageGCPeriod time.Duration
ImageRepositoryIPAddress string
ImageRepositoryHost string
// Namespace for Rainbond application.
RbdNamespace string
ImageRepositoryHost string
HostsFile string
}
//StatsdConfig StatsdConfig
@ -182,8 +184,9 @@ func (a *Conf) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&a.ImageGCPeriod, "image-gc-period", 5*time.Minute, "ImageGCPeriod is the period for performing image garbage collection. Examples: '10s', '5m' or '2h45m'.")
fs.Int32Var(&a.ImageGCHighThresholdPercent, "image-gc-high-threshold", 90, "The percent of disk usage after which image garbage collection is always run. Values must be within the range [0, 100], To disable image garbage collection, set to 100. ")
fs.Int32Var(&a.ImageGCLowThresholdPercent, "image-gc-low-threshold", 75, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Values must be within the range [0, 100] and should not be larger than that of --image-gc-high-threshold.")
fs.StringVar(&a.ImageRepositoryIPAddress, "image-repo-ip", "", "The ip address of image repository")
fs.StringVar(&a.RbdNamespace, "rbd-ns", "rbd-system", "The namespace of rainbond applications.")
fs.StringVar(&a.ImageRepositoryHost, "image-repo-host", "goodrain.me", "The host of image repository")
fs.StringVar(&a.HostsFile, "hostsfile", "/newetc/hosts", "/etc/hosts mapped path in the container. eg. /etc/hosts:/tmp/hosts. Do not set hostsfile to /etc/hosts")
}
//SetLog 设置log

View File

@ -21,6 +21,10 @@ package server
import (
"context"
"fmt"
"github.com/goodrain/rainbond/discover.v2"
"github.com/goodrain/rainbond/node/initiate"
"github.com/goodrain/rainbond/util/constants"
"k8s.io/client-go/kubernetes"
"os"
"os/signal"
"syscall"
@ -30,19 +34,19 @@ import (
"github.com/goodrain/rainbond/node/api"
"github.com/goodrain/rainbond/node/api/controller"
"github.com/goodrain/rainbond/node/core/store"
"github.com/goodrain/rainbond/node/initiate"
"github.com/goodrain/rainbond/node/kubecache"
"github.com/goodrain/rainbond/node/masterserver"
"github.com/goodrain/rainbond/node/nodem"
"github.com/goodrain/rainbond/node/nodem/docker"
"github.com/goodrain/rainbond/node/nodem/envoy"
etcdutil "github.com/goodrain/rainbond/util/etcd"
k8sutil "github.com/goodrain/rainbond/util/k8s"
"github.com/Sirupsen/logrus"
)
//Run start run
func Run(c *option.Conf) error {
func Run(cfg *option.Conf) error {
var stoped = make(chan struct{})
stopfunc := func() error {
close(stoped)
@ -53,23 +57,29 @@ func Run(c *option.Conf) error {
defer cancel()
etcdClientArgs := &etcdutil.ClientArgs{
Endpoints: c.EtcdEndpoints,
CaFile: c.EtcdCaFile,
CertFile: c.EtcdCertFile,
KeyFile: c.EtcdKeyFile,
DialTimeout: c.EtcdDialTimeout,
Endpoints: cfg.EtcdEndpoints,
CaFile: cfg.EtcdCaFile,
CertFile: cfg.EtcdCertFile,
KeyFile: cfg.EtcdKeyFile,
DialTimeout: cfg.EtcdDialTimeout,
}
if err := c.ParseClient(ctx, etcdClientArgs); err != nil {
if err := cfg.ParseClient(ctx, etcdClientArgs); err != nil {
return fmt.Errorf("config parse error:%s", err.Error())
}
hostManager := initiate.NewHostManager(c.ImageRepositoryIPAddress, c.ImageRepositoryHost)
if err := hostManager.CleanupAndFlush(); err != nil {
logrus.Errorf("error writing image repository resolve: %v", err)
config, err := k8sutil.NewRestConfig(cfg.K8SConfPath)
if err != nil {
return err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
nodemanager, err := nodem.NewNodeManager(c)
k8sDiscover := discover.NewK8sDiscover(ctx, clientset, cfg)
defer k8sDiscover.Stop()
nodemanager, err := nodem.NewNodeManager(cfg)
if err != nil {
return fmt.Errorf("create node manager failed: %s", err)
}
@ -78,7 +88,7 @@ func Run(c *option.Conf) error {
}
err = eventLog.NewManager(eventLog.EventConfig{
EventLogServers: c.EventLogServer,
EventLogServers: cfg.EventLogServer,
DiscoverArgs: etcdClientArgs,
})
if err != nil {
@ -87,23 +97,30 @@ func Run(c *option.Conf) error {
}
defer eventLog.CloseManager()
logrus.Debug("create and start event log client success")
kubecli, err := kubecache.NewKubeClient(c)
kubecli, err := kubecache.NewKubeClient(cfg, clientset)
if err != nil {
return err
}
defer kubecli.Stop()
logrus.Debug("create and start kube cache moudle success")
if cfg.ImageRepositoryHost == constants.DefImageRepository {
hostManager, err := initiate.NewHostManager(cfg, k8sDiscover)
if err != nil {
return fmt.Errorf("create new host manager: %v", err)
}
hostManager.Start()
}
logrus.Debugf("rbd-namespace=%s; rbd-docker-secret=%s", os.Getenv("RBD_NAMESPACE"), os.Getenv("RBD_DOCKER_SECRET"))
// sync docker inscure registries cert info into all rainbond node
if err = docker.SyncDockerCertFromSecret(kubecli.GetKubeClient(), os.Getenv("RBD_NAMESPACE"), os.Getenv("RBD_DOCKER_SECRET")); err != nil { // TODO fanyangyang namespace secretname
if err = docker.SyncDockerCertFromSecret(clientset, os.Getenv("RBD_NAMESPACE"), os.Getenv("RBD_DOCKER_SECRET")); err != nil { // TODO fanyangyang namespace secretname
return fmt.Errorf("sync docker cert from secret error: %s", err.Error())
}
// init etcd client
if err = store.NewClient(ctx, c, etcdClientArgs); err != nil {
return fmt.Errorf("Connect to ETCD %s failed: %s", c.EtcdEndpoints, err)
if err = store.NewClient(ctx, cfg, etcdClientArgs); err != nil {
return fmt.Errorf("Connect to ETCD %s failed: %s", cfg.EtcdEndpoints, err)
}
errChan := make(chan error, 3)
if err := nodemanager.Start(errChan); err != nil {
@ -114,7 +131,7 @@ func Run(c *option.Conf) error {
//master服务在node服务之后启动
var ms *masterserver.MasterServer
if c.RunMode == "master" {
if cfg.RunMode == "master" {
ms, err = masterserver.NewMasterServer(nodemanager.GetCurrentNode(), kubecli)
if err != nil {
logrus.Errorf(err.Error())
@ -129,7 +146,7 @@ func Run(c *option.Conf) error {
logrus.Debug("create and start master server moudle success")
}
//create api manager
apiManager := api.NewManager(*c, nodemanager.GetCurrentNode(), ms, kubecli)
apiManager := api.NewManager(*cfg, nodemanager.GetCurrentNode(), ms, kubecli)
if err := apiManager.Start(errChan); err != nil {
return err
}
@ -139,7 +156,7 @@ func Run(c *option.Conf) error {
defer apiManager.Stop()
//create service mesh controller
grpcserver, err := envoy.CreateDiscoverServerManager(kubecli, *c)
grpcserver, err := envoy.CreateDiscoverServerManager(clientset, *cfg)
if err != nil {
return err
}
@ -165,7 +182,7 @@ func Run(c *option.Conf) error {
logrus.Info("See you next time!")
return nil
}
err := initService(c, startfunc, stopfunc)
err := initService(cfg, startfunc, stopfunc)
if err != nil {
return err
}

View File

@ -37,6 +37,7 @@ type Config struct {
Port int
SessionKey string
PrometheusMetricPath string
K8SConfPath string
}
//WebCliServer container webcli server
@ -54,9 +55,13 @@ func NewWebCliServer() *WebCliServer {
func (a *WebCliServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&a.LogLevel, "log-level", "info", "the webcli log level")
fs.StringSliceVar(&a.EtcdEndPoints, "etcd-endpoints", []string{"http://127.0.0.1:2379"}, "etcd v3 cluster endpoints.")
fs.StringVar(&a.EtcdCaFile, "etcd-ca", "", "etcd tls ca file ")
fs.StringVar(&a.EtcdCertFile, "etcd-cert", "", "etcd tls cert file")
fs.StringVar(&a.EtcdKeyFile, "etcd-key", "", "etcd http tls cert key file")
fs.StringVar(&a.Address, "address", "0.0.0.0", "server listen address")
fs.StringVar(&a.HostIP, "hostIP", "", "Current node Intranet IP")
fs.StringVar(&a.HostName, "hostName", "", "Current node host name")
fs.StringVar(&a.K8SConfPath, "kube-conf", "", "absolute path to the kubeconfig file")
fs.IntVar(&a.Port, "port", 7171, "server listen port")
fs.StringVar(&a.PrometheusMetricPath, "metric", "/metrics", "prometheus metrics path")
}

View File

@ -39,7 +39,8 @@ func Run(s *option.WebCliServer) error {
option.Address = s.Address
option.Port = strconv.Itoa(s.Port)
option.SessionKey = s.SessionKey
ap, err := app.New(nil, &option)
option.K8SConfPath = s.K8SConfPath
ap, err := app.New(&option)
if err != nil {
return err
}

View File

@ -46,6 +46,7 @@ func main() {
}
ctx, cancel := context.WithCancel(context.Background())
shell := strings.Split(conf.RunShell, " ")
logrus.Infof("run shell: %s", shell)
cmd := exec.CommandContext(ctx, shell[0], shell[1:]...)
startFunc := func() error {
cmd.Stdin = os.Stdin
@ -67,9 +68,10 @@ func main() {
cancel()
}
}()
var s os.Signal = syscall.SIGTERM
defer func() {
if cmd.Process != nil {
if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
if err := cmd.Process.Signal(s); err != nil {
logrus.Errorf("send SIGTERM signal to progress failure %s", err.Error())
}
time.Sleep(time.Second * 2)
@ -79,7 +81,8 @@ func main() {
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case <-term:
case ls := <-term:
s = ls
logrus.Warn("Received SIGTERM, exiting gracefully...")
case <-ctx.Done():
}

View File

@ -25,9 +25,6 @@ import (
"github.com/Sirupsen/logrus"
"github.com/eapache/channels"
"k8s.io/client-go/kubernetes"
kubeaggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"github.com/goodrain/rainbond/cmd/worker/option"
"github.com/goodrain/rainbond/db"
"github.com/goodrain/rainbond/db/config"
@ -42,6 +39,7 @@ import (
"github.com/goodrain/rainbond/worker/master"
"github.com/goodrain/rainbond/worker/monitor"
"github.com/goodrain/rainbond/worker/server"
"k8s.io/client-go/kubernetes"
)
//Run start run
@ -85,12 +83,6 @@ func Run(s *option.Worker) error {
}
s.Config.KubeClient = clientset
kubeaggregatorclientset, err := kubeaggregatorclientset.NewForConfig(restConfig)
if err != nil {
logrus.Error("kube aggregator; read kube config file error.", err)
return err
}
//step 3: create resource store
startCh := channels.NewRingChannel(1024)
updateCh := channels.NewRingChannel(1024)
@ -111,7 +103,7 @@ func Run(s *option.Worker) error {
defer controllerManager.Stop()
//step 5 : start runtime master
masterCon, err := master.NewMasterController(s.Config, cachestore, kubeaggregatorclientset)
masterCon, err := master.NewMasterController(s.Config, cachestore)
if err != nil {
return err
}

View File

@ -41,6 +41,11 @@ type DelDao interface {
DeleteModel(serviceID string, arg ...interface{}) error
}
// EnterpriseDao enterprise dao
type EnterpriseDao interface {
GetEnterpriseTenants(enterpriseID string) ([]*model.Tenants, error)
}
//TenantDao tenant dao
type TenantDao interface {
Dao
@ -99,6 +104,7 @@ type TenantServiceDao interface {
UpdateDeployVersion(serviceID, deployversion string) error
ListThirdPartyServices() ([]*model.TenantServices, error)
ListServicesByTenantID(tenantID string) ([]*model.TenantServices, error)
GetServiceTypeById(serviceID string) (*model.TenantServices, error)
}
//TenantServiceDeleteDao TenantServiceDeleteDao

View File

@ -36,6 +36,7 @@ type Manager interface {
VolumeTypeDao() dao.VolumeTypeDao
LicenseDao() dao.LicenseDao
AppDao() dao.AppDao
EnterpriseDao() dao.EnterpriseDao
TenantDao() dao.TenantDao
TenantDaoTransactions(db *gorm.DB) dao.TenantDao
TenantServiceDao() dao.TenantServiceDao

View File

@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: db/db.go
// Source: db.go
// Package db is a generated GoMock package.
package db
@ -106,6 +106,18 @@ func (mr *MockManagerMockRecorder) AppDao() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppDao", reflect.TypeOf((*MockManager)(nil).AppDao))
}
// EnterpriseDao mocks base method
func (m *MockManager) EnterpriseDao() dao.EnterpriseDao {
ret := m.ctrl.Call(m, "EnterpriseDao")
ret0, _ := ret[0].(dao.EnterpriseDao)
return ret0
}
// EnterpriseDao indicates an expected call of EnterpriseDao
func (mr *MockManagerMockRecorder) EnterpriseDao() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnterpriseDao", reflect.TypeOf((*MockManager)(nil).EnterpriseDao))
}
// TenantDao mocks base method
func (m *MockManager) TenantDao() dao.TenantDao {
ret := m.ctrl.Call(m, "TenantDao")

View File

@ -155,8 +155,8 @@ func TestSetServiceLabel(t *testing.T) {
t.Fatal(err)
}
label := model.TenantServiceLable{
LabelKey: model.LabelKeyServiceType,
LabelValue: util.StatefulServiceType,
LabelKey: "labelkey",
LabelValue: "labelvalue",
ServiceID: "889bb1f028f655bebd545f24aa184a0b",
}
label.CreatedAt = time.Now()

View File

@ -28,7 +28,7 @@ type AppBackup struct {
SourceDir string `gorm:"column:source_dir;size:255" json:"source_dir"`
SourceType string `gorm:"column:source_type;size:255;default:'local'" json:"source_type"`
BackupMode string `gorm:"column:backup_mode;size:32" json:"backup_mode"`
BuckupSize int64 `gorm:"column:backup_size type:bigint" json:"backup_size"`
BuckupSize int64 `gorm:"column:backup_size;type:bigint" json:"backup_size"`
Deleted bool `gorm:"column:deleted" json:"deleted"`
}

View File

@ -85,6 +85,68 @@ func (s ServiceKind) String() string {
return string(s)
}
// ServiceType type of service
type ServiceType string
// String imple String
func (s ServiceType) String() string {
return string(s)
}
// IsState is state type or not
func (s ServiceType) IsState() bool {
if s == "" {
return false
}
if s == ServiceTypeStatelessSingleton || s == ServiceTypeStatelessMultiple {
return false
}
return true
}
// IsSingleton is singleton or not
func (s ServiceType) IsSingleton() bool {
if s == "" {
return false
}
if s == ServiceTypeStatelessMultiple || s == ServiceTypeStateMultiple {
return false
}
return true
}
// TODO fanyangyang 根据组件简单判断是否是有状态
// IsState is state service or stateless service
func (ts TenantServices) IsState() bool {
if ts.ServiceType == "" {
return false
}
return ServiceType(ts.ServiceType).IsState()
}
// IsSingleton is singleton or multiple service
func (ts TenantServices) IsSingleton() bool {
if ts.ServiceType == "" {
return false
}
return ServiceType(ts.ServiceType).IsSingleton()
}
// ServiceTypeUnknown unknown
var ServiceTypeUnknown ServiceType = "unknown"
//ServiceTypeStatelessSingleton stateless_singleton
var ServiceTypeStatelessSingleton ServiceType = "stateless_singleton"
// ServiceTypeStatelessMultiple stateless_multiple
var ServiceTypeStatelessMultiple ServiceType = "stateless_multiple"
// ServiceTypeStateSingleton state_singleton
var ServiceTypeStateSingleton ServiceType = "state_singleton"
// ServiceTypeStateMultiple state_multiple
var ServiceTypeStateMultiple ServiceType = "state_multiple"
//TenantServices app service base info
type TenantServices struct {
Model
@ -96,6 +158,8 @@ type TenantServices struct {
ServiceAlias string `gorm:"column:service_alias;size:30" json:"service_alias"`
// service regist endpoint name(host name), used of statefulset
ServiceName string `gorm:"column:service_name;size:100" json:"service_name"`
// Service type now service support stateless_singleton/stateless_multiple/state_singleton/state_multiple
ServiceType string `gorm:"column:service_type;size:32" json:"service_type"`
// 服务描述
Comment string `gorm:"column:comment" json:"comment"`
// 容器CPU权重
@ -105,7 +169,7 @@ type TenantServices struct {
//UpgradeMethod service upgrade controller type
//such as : `Rolling` `OnDelete`
UpgradeMethod string `gorm:"column:upgrade_method;default:'Rolling'" json:"upgrade_method"`
// 扩容方式0:无状态1:有状态2:分区
// 扩容方式0:无状态1:有状态2:分区(V5.2已弃用)
ExtendMethod string `gorm:"column:extend_method;default:'stateless';" json:"extend_method"`
// 节点数
Replicas int `gorm:"column:replicas;default:1" json:"replicas"`
@ -197,6 +261,8 @@ type TenantServicesDelete struct {
ServiceAlias string `gorm:"column:service_alias;size:30" json:"service_alias"`
// service regist endpoint name(host name), used of statefulset
ServiceName string `gorm:"column:service_name;size:100" json:"service_name"`
// Service type now service support stateless_singleton/stateless_multiple/state_singleton/state_multiple
ServiceType string `gorm:"column:service_type;size:20" json:"service_type"`
// 服务描述
Comment string `gorm:"column:comment" json:"comment"`
// 容器CPU权重
@ -423,6 +489,7 @@ var LabelKeyNodeSelector = "node-selector"
//LabelKeyNodeAffinity 节点亲和标签
var LabelKeyNodeAffinity = "node-affinity"
// TODO fanyangyang 待删除组件类型记录在tenant_service表中
//LabelKeyServiceType 应用部署类型标签
var LabelKeyServiceType = "service-type"

View File

@ -0,0 +1,22 @@
package dao
import (
"github.com/goodrain/rainbond/db/model"
"github.com/jinzhu/gorm"
)
//EnterpriseDaoImpl 租户信息管理
type EnterpriseDaoImpl struct {
DB *gorm.DB
}
func (e *EnterpriseDaoImpl) GetEnterpriseTenants(enterpriseID string) ([]*model.Tenants, error) {
var tenants []*model.Tenants
if enterpriseID == "" {
return []*model.Tenants{}, nil
}
if err := e.DB.Where("eid= ?", enterpriseID).Find(&tenants).Error; err != nil {
return nil, err
}
return tenants, nil
}

View File

@ -172,6 +172,27 @@ type TenantServicesDaoImpl struct {
DB *gorm.DB
}
// GetServiceTypeById get service type by service id
func (t *TenantServicesDaoImpl) GetServiceTypeById(serviceID string) (*model.TenantServices, error) {
var service model.TenantServices
if err := t.DB.Select("tenant_id, service_id, service_alias, service_type").Where("service_id=?", serviceID).Find(&service).Error; err != nil {
return nil, err
}
if service.ServiceType == "" {
// for before V5.2 version
logrus.Infof("get low version service[%s] type", serviceID)
rows, err := t.DB.Raw("select label_value from tenant_services_label where service_id=? and label_key=?", serviceID, "service-type").Rows()
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
rows.Scan(&service.ServiceType)
}
}
return &service, nil
}
//GetAllServicesID get all service sample info
func (t *TenantServicesDaoImpl) GetAllServicesID() ([]*model.TenantServices, error) {
var services []*model.TenantServices
@ -1322,22 +1343,12 @@ type ServiceLabelDaoImpl struct {
func (t *ServiceLabelDaoImpl) AddModel(mo model.Interface) error {
label := mo.(*model.TenantServiceLable)
var oldLabel model.TenantServiceLable
if label.LabelKey == model.LabelKeyServiceType { //LabelKeyServiceType 只能有一条
if ok := t.DB.Where("service_id = ? and label_key=?", label.ServiceID, label.LabelKey).Find(&oldLabel).RecordNotFound(); ok {
if err := t.DB.Create(label).Error; err != nil {
return err
}
} else {
return fmt.Errorf("label key %s of service %s is exist", label.LabelKey, label.ServiceID)
if ok := t.DB.Where("service_id = ? and label_key=? and label_value=?", label.ServiceID, label.LabelKey, label.LabelValue).Find(&oldLabel).RecordNotFound(); ok {
if err := t.DB.Create(label).Error; err != nil {
return err
}
} else {
if ok := t.DB.Where("service_id = ? and label_key=? and label_value=?", label.ServiceID, label.LabelKey, label.LabelValue).Find(&oldLabel).RecordNotFound(); ok {
if err := t.DB.Create(label).Error; err != nil {
return err
}
} else {
return fmt.Errorf("label key %s value %s of service %s is exist", label.LabelKey, label.LabelValue, label.ServiceID)
}
return fmt.Errorf("label key %s value %s of service %s is exist", label.LabelKey, label.LabelValue, label.ServiceID)
}
return nil
}
@ -1439,15 +1450,10 @@ func (t *ServiceLabelDaoImpl) GetTenantServiceAffinityLabel(serviceID string) ([
return labels, nil
}
// no usages func. get tenant service type use TenantServiceDao.GetServiceTypeById(serviceID string)
//GetTenantServiceTypeLabel GetTenantServiceTypeLabel
func (t *ServiceLabelDaoImpl) GetTenantServiceTypeLabel(serviceID string) (*model.TenantServiceLable, error) {
var label model.TenantServiceLable
if err := t.DB.Where("service_id=? and label_key=?", serviceID, model.LabelKeyServiceType).Find(&label).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, err
}
return &label, nil
}

View File

@ -39,6 +39,13 @@ func (m *Manager) LicenseDao() dao.LicenseDao {
}
}
// EnterpriseDao enterprise dao
func (m *Manager) EnterpriseDao() dao.EnterpriseDao {
return &mysqldao.EnterpriseDaoImpl{
DB: m.db,
}
}
//TenantDao 租户数据
func (m *Manager) TenantDao() dao.TenantDao {
return &mysqldao.TenantDaoImpl{

View File

@ -52,7 +52,9 @@ type Callback interface {
//Discover 后端服务自动发现
type Discover interface {
// Add project to cache if not exists, then watch the endpoints.
AddProject(name string, callback Callback)
// Update a project.
AddUpdateProject(name string, callback CallbackUpdate)
Stop()
}

170
discover.v2/k8s_discover.go Normal file
View File

@ -0,0 +1,170 @@
package discover
import (
"context"
"github.com/Sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/discover/config"
)
type k8sDiscover struct {
ctx context.Context
cancel context.CancelFunc
lock sync.Mutex
clientset kubernetes.Interface
cfg *option.Conf
projects map[string]CallbackUpdate
}
func NewK8sDiscover(ctx context.Context, clientset kubernetes.Interface, cfg *option.Conf) Discover {
ctx, cancel := context.WithCancel(ctx)
return &k8sDiscover{
ctx: ctx,
cancel: cancel,
clientset: clientset,
cfg: cfg,
projects: make(map[string]CallbackUpdate),
}
}
func (k *k8sDiscover) Stop() {
k.cancel()
}
func (k *k8sDiscover) AddProject(name string, callback Callback) {
k.lock.Lock()
defer k.lock.Unlock()
if _, ok := k.projects[name]; !ok {
cal := &defaultCallBackUpdate{
callback: callback,
endpoints: make(map[string]*config.Endpoint),
}
k.projects[name] = cal
go k.discover(name, cal)
}
}
func (k *k8sDiscover) AddUpdateProject(name string, callback CallbackUpdate) {
k.lock.Lock()
defer k.lock.Unlock()
if _, ok := k.projects[name]; !ok {
k.projects[name] = callback
go k.discover(name, callback)
}
}
func (k *k8sDiscover) discover(name string, callback CallbackUpdate) {
ctx, cancel := context.WithCancel(k.ctx)
defer cancel()
endpoints := k.list(name)
if len(endpoints) > 0 {
callback.UpdateEndpoints(config.SYNC, endpoints...)
}
w, err := k.clientset.CoreV1().Pods(k.cfg.RbdNamespace).Watch(metav1.ListOptions{
LabelSelector: "name=" + name,
})
if err != nil {
k.rewatchWithErr(name, callback, err)
return
}
for {
select {
case <-ctx.Done():
return
case event := <-w.ResultChan():
if event.Object == nil {
continue
}
pod := event.Object.(*corev1.Pod)
ep := endpointForPod(pod)
switch event.Type {
case watch.Deleted:
callback.UpdateEndpoints(config.DELETE, ep)
case watch.Added, watch.Modified:
if !isPodReady(pod) {
continue
}
callback.UpdateEndpoints(config.SYNC, ep)
case watch.Error:
k.rewatchWithErr(name, callback, err)
}
}
}
}
func (k *k8sDiscover) removeProject(name string) {
k.lock.Lock()
defer k.lock.Unlock()
if _, ok := k.projects[name]; ok {
delete(k.projects, name)
}
}
func (k *k8sDiscover) rewatchWithErr(name string, callback CallbackUpdate, err error) {
logrus.Debugf("name: %s; monitor discover get watch error: %s, remove this watch target first, and then sleep 10 sec, we will re-watch it", name, err.Error())
callback.Error(err)
k.removeProject(name)
time.Sleep(10 * time.Second)
k.AddUpdateProject(name, callback)
}
func (k *k8sDiscover) list(name string) []*config.Endpoint {
podList, err := k.clientset.CoreV1().Pods(k.cfg.RbdNamespace).List(metav1.ListOptions{
LabelSelector: "name=" + name,
})
if err != nil {
logrus.Warningf("list pods for %s: %v", name, err)
return nil
}
var endpoints []*config.Endpoint
var notReadyEp *config.Endpoint
for _, pod := range podList.Items {
ep := endpointForPod(&pod)
if isPodReady(&pod) {
endpoints = append(endpoints, ep)
continue
}
if notReadyEp == nil {
notReadyEp = endpointForPod(&pod)
}
}
// If there are no ready endpoints, a not ready endpoint is used
if len(endpoints) == 0 && notReadyEp != nil {
endpoints = append(endpoints, notReadyEp)
}
return endpoints
}
func endpointForPod(pod *corev1.Pod) *config.Endpoint {
return &config.Endpoint{
Name: pod.Name,
URL: pod.Status.PodIP,
}
}
func isPodReady(pod *corev1.Pod) bool {
if pod.ObjectMeta.DeletionTimestamp != nil {
return false
}
for _, cond := range pod.Status.Conditions {
if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue {
return true
}
}
return false
}

View File

@ -0,0 +1,146 @@
package discover
import (
"context"
"fmt"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/discover/config"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sutil "github.com/goodrain/rainbond/util/k8s"
)
func TestK8sDiscover_AddProject(t *testing.T) {
tests := []struct {
name string
}{
{
name: "ok",
},
}
for idx := range tests {
tc := tests[idx]
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
cfg := &option.Conf{RbdNamespace: ""}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "rbd-gateway-abcde",
Labels: map[string]string{
"name": "rbd-gateway",
},
},
Status: corev1.PodStatus{
PodIP: "172.20.0.20",
},
}
clientset := fake.NewSimpleClientset(pod)
discover := NewK8sDiscover(ctx, clientset, cfg)
defer discover.Stop()
callback := &testCallback{
epCh: make(chan []*config.Endpoint),
errCh: make(chan error),
}
discover.AddProject("rbd-gateway", callback)
go func() {
for {
select {
case endpoints := <-callback.epCh:
for _, ep := range endpoints {
fmt.Printf("%#v\n", ep)
}
case err := <-callback.errCh:
t.Errorf("received unexpected error from callback: %v", err)
return
default:
}
}
}()
time.Sleep(2 * time.Second)
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
Type: corev1.PodReady,
Status: corev1.ConditionTrue,
})
pod.Status.PodIP = "172.20.0.50"
_, err := clientset.CoreV1().Pods("").Update(pod)
if err != nil {
t.Error(err)
}
time.Sleep(1 * time.Second)
err = clientset.CoreV1().Pods("").Delete(pod.Name, &metav1.DeleteOptions{})
if err != nil {
t.Error(err)
}
time.Sleep(30 * time.Second)
})
}
}
type testCallback struct {
epCh chan []*config.Endpoint
errCh chan error
}
func (t *testCallback) UpdateEndpoints(endpoints ...*config.Endpoint) {
t.epCh <- endpoints
}
func (t *testCallback) Error(err error) {
fmt.Println(err)
}
func TestK8sDiscover_AddProject2(t *testing.T) {
c, err := k8sutil.NewRestConfig("/Users/abewang/.kube/config")
if err != nil {
t.Error(err)
t.FailNow()
}
clientset, err := kubernetes.NewForConfig(c)
if err != nil {
t.Error(err)
t.FailNow()
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfg := &option.Conf{RbdNamespace: "rbd-system"}
discover := NewK8sDiscover(ctx, clientset, cfg)
defer discover.Stop()
callback := &testCallback{
epCh: make(chan []*config.Endpoint),
errCh: make(chan error),
}
discover.AddProject("rbd-gateway", callback)
for {
select {
case endpoints := <-callback.epCh:
for _, ep := range endpoints {
fmt.Printf("%#v\n", ep)
}
case err := <-callback.errCh:
t.Errorf("received unexpected error from callback: %v", err)
return
default:
}
}
}

View File

@ -46,6 +46,11 @@ func (w *WebsocketMessage) Encode() []byte {
return reb
}
type sendMessage struct {
messageType int
data []byte
}
//PubContext websocket context
type PubContext struct {
ID string
@ -57,6 +62,7 @@ type PubContext struct {
chans map[string]*Chan
lock sync.Mutex
close chan struct{}
sendQueue chan sendMessage
}
//Chan handle
@ -82,6 +88,7 @@ func NewPubContext(upgrader websocket.Upgrader,
httpRequest: httpRequest,
server: s,
chans: make(map[string]*Chan, 2),
sendQueue: make(chan sendMessage, 1024),
close: make(chan struct{}),
}
}
@ -221,7 +228,7 @@ func (p *PubContext) readMessage(closed chan struct{}) {
continue
}
if messageType == websocket.PingMessage {
p.conn.WriteMessage(websocket.PongMessage, []byte{})
p.SendWebsocketMessage(websocket.PongMessage)
continue
}
if messageType == websocket.BinaryMessage {
@ -230,16 +237,34 @@ func (p *PubContext) readMessage(closed chan struct{}) {
}
}
func (p *PubContext) send() {
for {
select {
case m, ok := <-p.sendQueue:
if !ok {
return
}
p.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := p.conn.WriteMessage(m.messageType, m.data); err != nil {
p.server.log.Debugf("write websocket message failure %s", err.Error())
}
case <-p.close:
p.server.log.Debugf("pub context send chan closed")
return
}
}
}
//SendMessage send websocket message
func (p *PubContext) SendMessage(message WebsocketMessage) error {
p.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
return p.conn.WriteMessage(websocket.TextMessage, message.Encode())
p.sendQueue <- sendMessage{messageType: websocket.TextMessage, data: message.Encode()}
return nil
}
//SendWebsocketMessage send websocket message
func (p *PubContext) SendWebsocketMessage(message int) error {
p.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
return p.conn.WriteMessage(message, []byte{})
p.sendQueue <- sendMessage{messageType: message, data: []byte{}}
return nil
}
func (p *PubContext) sendPing(closed chan struct{}) {
@ -263,6 +288,7 @@ func (p *PubContext) Start() {
p.server.log.Error("Create web socket conn error.", err.Error())
return
}
go p.send()
pingclosed := make(chan struct{})
readclosed := make(chan struct{})
go p.sendPing(pingclosed)

View File

@ -253,7 +253,8 @@ func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) {
config.ProxyBuffering = defBackend.ProxyBuffering
}
config.SetHeaders = make(map[string]string)
config.SetHeaders = defBackend.ProxySetHeaders
//default header
for k, v := range defBackend.ProxySetHeaders {
config.SetHeaders[k] = v
}

View File

@ -102,7 +102,19 @@ func NewDefault() Configuration {
LimitRate: 0,
LimitRateAfter: 0,
ProxyBuffering: "off",
ProxySetHeaders: map[string]string{},
//defaut set header
ProxySetHeaders: map[string]string{
"Host": "$best_http_host",
"X-Real-IP": "$remote_addr",
"X-Forwarded-For": "$remote_addr",
"X-Forwarded-Host": "$best_http_host",
"X-Forwarded-Port": "$pass_port",
"X-Forwarded-Proto": "$pass_access_scheme",
"X-Scheme": "$pass_access_scheme",
// mitigate HTTPoxy Vulnerability
// https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
"Proxy": "\"\"",
},
},
}
return cfg

View File

@ -20,6 +20,8 @@ package store
import (
"bytes"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"net"
@ -816,7 +818,6 @@ func (s *k8sStore) syncSecret(secrKey string) {
logrus.Errorf("fail to get certificate pem: %v", err)
return
}
old, exists := s.sslStore.Get(secrKey)
if exists {
oldSSLCert := old.(*v1.SSLCert)
@ -857,9 +858,22 @@ func (s *k8sStore) getCertificatePem(secrKey string) (*v1.SSLCert, error) {
if e := ioutil.WriteFile(filename, buffer.Bytes(), 0666); e != nil {
return nil, fmt.Errorf("cant not write data to %s: %v", filename, e)
}
fileContent, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("read certificate file failed: %s", err.Error())
}
pemContent, _ := pem.Decode(fileContent)
certificate, err := x509.ParseCertificate(pemContent.Bytes)
if err != nil {
return nil, fmt.Errorf("generate certificate object failed: %s", err.Error())
}
return &v1.SSLCert{
CertificatePem: filename,
Certificate: certificate,
CertificateStr: string(certificate.Raw),
PrivateKey: string(key),
CN: []string{certificate.Subject.CommonName},
}, nil
}

View File

@ -19,24 +19,34 @@
package clients
import (
"fmt"
"os"
"path"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond-operator/pkg/generated/clientset/versioned"
"github.com/goodrain/rainbond/builder/sources"
k8sutil "github.com/goodrain/rainbond/util/k8s"
"github.com/Sirupsen/logrus"
"k8s.io/client-go/kubernetes"
)
//K8SClient K8SClient
var K8SClient kubernetes.Interface
//RainbondKubeClient rainbond custom resource client
var RainbondKubeClient versioned.Interface
//InitClient init k8s client
func InitClient(kubeconfig string) error {
if kubeconfig == "" {
homePath, _ := sources.Home()
kubeconfig = path.Join(homePath, ".kube/config")
}
_, err := os.Stat(kubeconfig)
if err != nil {
fmt.Printf("Please make sure the kube-config file(%s) exists\n", kubeconfig)
os.Exit(1)
}
// use the current context in kubeconfig
config, err := k8sutil.NewRestConfig(kubeconfig)
if err != nil {
@ -50,5 +60,6 @@ func InitClient(kubeconfig string) error {
logrus.Error("Create kubernetes client error.", err.Error())
return err
}
RainbondKubeClient = versioned.NewForConfigOrDie(config)
return nil
}

View File

@ -29,8 +29,9 @@ import (
//NewCmdAnsible ansible config cmd
func NewCmdAnsible() cli.Command {
c := cli.Command{
Name: "ansible",
Usage: "Manage the ansible environment",
Name: "ansible",
Usage: "Manage the ansible environment",
Hidden: true,
Subcommands: []cli.Command{
cli.Command{
Name: "hosts",

View File

@ -21,14 +21,17 @@ package cmd
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"github.com/apcera/termtables"
"github.com/ghodss/yaml"
"github.com/goodrain/rainbond/grctl/clients"
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/gosuri/uitable"
"github.com/urfave/cli"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//NewCmdCluster cmd for cluster
@ -36,6 +39,16 @@ func NewCmdCluster() cli.Command {
c := cli.Command{
Name: "cluster",
Usage: "show curren cluster datacenter info",
Subcommands: []cli.Command{
cli.Command{
Name: "config",
Usage: "prints the current cluster configuration",
Action: func(c *cli.Context) error {
Common(c)
return printConfig(c)
},
},
},
Action: func(c *cli.Context) error {
Common(c)
return getClusterInfo(c)
@ -51,12 +64,10 @@ func getClusterInfo(c *cli.Context) error {
if err.Code == 502 {
fmt.Println("The current cluster node manager is not working properly.")
fmt.Println("You can query the service log for troubleshooting.")
fmt.Println("Exec Command: journalctl -fu node")
os.Exit(1)
}
fmt.Println("The current cluster api server is not working properly.")
fmt.Println("You can query the service log for troubleshooting.")
fmt.Println("Exec Command: journalctl -fu rbd-api")
os.Exit(1)
}
healthCPUFree := fmt.Sprintf("%.2f", float32(clusterInfo.HealthCapCPU)-clusterInfo.HealthReqCPU)
@ -88,22 +99,8 @@ func getClusterInfo(c *cli.Context) error {
}())+"%")
fmt.Println(table)
//show services health status
allNodeHealth, err := clients.RegionClient.Nodes().GetAllNodeHealth()
handleErr(err)
serviceTable2 := termtables.CreateTable()
serviceTable2.AddHeaders("Service", "HealthyQuantity/Total", "Message")
serviceStatusInfo := allNodeHealth
status, message := clusterStatus(serviceStatusInfo["Role"], serviceStatusInfo["Ready"])
serviceTable2.AddRow("\033[0;33;33mClusterStatus\033[0m", status, message)
for name, v := range serviceStatusInfo {
if name == "Role" {
continue
}
status, message := summaryResult(v)
serviceTable2.AddRow(name, status, message)
}
fmt.Println(serviceTable2.Render())
//show component health status
printComponentStatus()
//show node detail
serviceTable := termtables.CreateTable()
serviceTable.AddHeaders("Uid", "IP", "HostName", "NodeRole", "Status")
@ -232,3 +229,23 @@ func clusterStatus(roleList []map[string]string, ReadyList []map[string]string)
}
return clusterStatus, errMessage
}
func printComponentStatus() {
fmt.Println("----------------------------------------------------------------------------------")
fmt.Println()
cmd := exec.Command("kubectl", "get", "pod", "-n", "rbd-system", "-o", "wide")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Run()
fmt.Println()
}
func printConfig(c *cli.Context) error {
config, err := clients.RainbondKubeClient.RainbondV1alpha1().RainbondClusters("rbd-system").Get("rainbondcluster", metav1.GetOptions{})
if err != nil {
showError(err.Error())
}
out, _ := yaml.Marshal(config)
fmt.Println(string(out))
return nil
}

View File

@ -51,7 +51,11 @@ func Common(c *cli.Context) {
if err != nil {
logrus.Warn("Load config file error.", err.Error())
}
if err := clients.InitClient(c.GlobalString("kubeconfig")); err != nil {
kc := c.GlobalString("kubeconfig")
if kc != "" {
config.Kubernets.KubeConf = kc
}
if err := clients.InitClient(config.Kubernets.KubeConf); err != nil {
logrus.Errorf("error config k8s,details %s", err.Error())
}
//clients.SetInfo(config.RegionAPI.URL, config.RegionAPI.Token)
@ -61,6 +65,21 @@ func Common(c *cli.Context) {
}
//Common Common
func CommonWithoutRegion(c *cli.Context) {
config, err := conf.LoadConfig(c)
if err != nil {
logrus.Warn("Load config file error.", err.Error())
}
kc := c.GlobalString("kubeconfig")
if kc != "" {
config.Kubernets.KubeConf = kc
}
if err := clients.InitClient(config.Kubernets.KubeConf); err != nil {
logrus.Errorf("error config k8s,details %s", err.Error())
}
}
// fatal prints the message (if provided) and then exits. If V(2) or greater,
// glog.Fatal is invoked for extended information.
func fatal(msg string, code int) {

View File

@ -1,31 +1,115 @@
package cmd
import (
"fmt"
"io/ioutil"
"os"
"os/signal"
"syscall"
"path"
"github.com/Sirupsen/logrus"
"github.com/goodrain/rainbond/api/region"
"github.com/goodrain/rainbond/builder/sources"
"github.com/goodrain/rainbond/cmd/grctl/option"
"github.com/goodrain/rainbond/grctl/clients"
"github.com/urfave/cli"
yaml "gopkg.in/yaml.v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var pemDirPath = "/opt/rainbond/etc/ssl/region/"
var clientPemPath = path.Join(pemDirPath, "client.pem")
var clientKeyPemPath = path.Join(pemDirPath, "client.key.pem")
var clientCAPemPath = path.Join(pemDirPath, "ca.pem")
//NewCmdInstall -
func NewCmdInstall() cli.Command {
c := cli.Command{
Name: "install",
Hidden: true,
Usage: "grctl install",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "gateway-ips",
Usage: "all gateway ip of this cluster, use it to access the region api",
EnvVar: "GatewayIP",
},
},
Usage: "grctl install",
Action: func(c *cli.Context) error {
//step finally: listen Signal
term := make(chan os.Signal)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
select {
case s := <-term:
logrus.Infof("Received a Signal %s, exiting gracefully...", s.String())
CommonWithoutRegion(c)
apiClientSecrit, err := clients.K8SClient.CoreV1().Secrets("rbd-system").Get("rbd-api-client-cert", metav1.GetOptions{})
if err != nil {
showError(fmt.Sprintf("get region api tls secret failure %s", err.Error()))
}
logrus.Info("See you next time!")
regionAPIIP := c.StringSlice("gateway-ip")
if len(regionAPIIP) == 0 {
cluster, err := clients.RainbondKubeClient.RainbondV1alpha1().RainbondClusters("rbd-system").Get("rainbondcluster", metav1.GetOptions{})
if err != nil {
showError(fmt.Sprintf("get rainbond cluster config failure %s", err.Error()))
}
gatewayIP := cluster.GatewayIngressIPs()
if len(gatewayIP) == 0 {
showError("gateway ip not found")
}
regionAPIIP = gatewayIP
}
if err := writeCertFile(apiClientSecrit); err != nil {
showError(fmt.Sprintf("write region api cert file failure %s", err.Error()))
}
if err := writeConfig(regionAPIIP); err != nil {
showError(fmt.Sprintf("write grctl config file failure %s", err.Error()))
}
fmt.Println("Install success!")
return nil
},
}
return c
}
func writeCertFile(apiClientSecrit *v1.Secret) error {
if _, err := os.Stat(pemDirPath); err != nil {
os.MkdirAll(pemDirPath, os.ModeDir)
}
if err := ioutil.WriteFile(clientPemPath, apiClientSecrit.Data["client.pem"], 0411); err != nil && !os.IsExist(err) {
return err
}
if err := ioutil.WriteFile(clientKeyPemPath, apiClientSecrit.Data["client.key.pem"], 0411); err != nil && !os.IsExist(err) {
return err
}
if err := ioutil.WriteFile(clientCAPemPath, apiClientSecrit.Data["ca.pem"], 0411); err != nil && !os.IsExist(err) {
return err
}
return nil
}
func writeConfig(ips []string) error {
var endpoints []string
for _, ip := range ips {
endpoints = append(endpoints, fmt.Sprintf("https://%s:8443", ip))
}
var config = option.Config{
RegionAPI: region.APIConf{
Endpoints: endpoints,
Cacert: clientCAPemPath,
Cert: clientPemPath,
CertKey: clientKeyPemPath,
},
}
home, _ := sources.Home()
configFilePath := path.Join(home, ".rbd", "grctl.yaml")
os.MkdirAll(path.Dir(configFilePath), os.ModeDir)
os.Remove(configFilePath)
configFile, err := os.OpenFile(configFilePath, os.O_CREATE|os.O_RDWR, 0411)
if err != nil {
return err
}
defer configFile.Close()
body, err := yaml.Marshal(&config)
if err != nil {
return err
}
_, err = configFile.Write(body)
if err != nil {
return err
}
return nil
}

View File

@ -51,17 +51,7 @@ server {
set $pass_access_scheme $scheme;
set $best_http_host $http_host;
set $pass_port $server_port;
# default proxy_set_header
proxy_set_header Host $best_http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Scheme $pass_access_scheme;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# custom proxy_set_header
{{ range $k, $v := $loc.Proxy.SetHeaders }}
proxy_set_header {{$k}} {{$v}};

View File

@ -1,6 +1,6 @@
FROM goodrainapps/alpine:3.4
COPY . /run
RUN chmod +x /run/rainbond-grctl /run/entrypoint.sh
VOLUME [ "/rootfs" ]
VOLUME [ "/rootfs/root","/rootfs/path","/ssl" ]
ENV RELEASE_DESC=__RELEASE_DESC__
ENTRYPOINT ["/run/entrypoint.sh"]

View File

@ -0,0 +1,10 @@
## Install
### necessary condition
`~/.kube/config` exist
### install shell
```bash
docker run -it --rm -v /:/rootfs goodrain.me/rbd-grctl:V5.2-dev copy
grctl install
```

View File

@ -3,11 +3,8 @@ if [ "$1" = "bash" ];then
exec /bin/bash
elif [ "$1" = "version" ];then
echo "$RELEASE_DESC"
elif [ "$1" = "install" ];then
cp -a /run/rainbond-grctl /rootfs/path
mkdir -p /rootfs/root/.rbd
cp -a /run/grctl.yaml /rootfs/root/.rbd/grctl.yaml
exec /run/rainbond-grctl install
elif [ "$1" = "copy" ];then
cp -a /run/rainbond-grctl /rootfs/usr/local/bin/
else
exec /run/rainbond-grctl "$@"
fi

View File

@ -22,11 +22,18 @@ package initiate
import (
"bufio"
"context"
"errors"
"fmt"
"net"
"os"
"strings"
"github.com/Sirupsen/logrus"
discover "github.com/goodrain/rainbond/discover.v2"
"github.com/goodrain/rainbond/discover/config"
"github.com/goodrain/rainbond/cmd/node/option"
)
const (
@ -36,46 +43,78 @@ const (
eol = "\n"
)
var (
// ErrRegistryAddressNotFound record not found error, happens when haven't find any matched registry address.
ErrRegistryAddressNotFound = errors.New("registry address not found")
)
// HostManager is responsible for writing the resolution of the private image repository domain name to /etc/hosts.
type HostManager struct {
// private image repository domain name
Domain string
// private image repository IP address
IP string
type HostManager interface {
Start()
}
// NewHostManager creates a new HostManager.
func NewHostManager(ip, domain string) *HostManager {
return &HostManager{
Domain: domain,
IP: ip,
func NewHostManager(cfg *option.Conf, discover discover.Discover) (HostManager, error) {
hosts, err := NewHosts(cfg.HostsFile)
if err != nil {
return nil, err
}
callback := &hostCallback{
cfg: cfg,
hosts: hosts,
}
return &hostManager{
cfg: cfg,
discover: discover,
hostCallback: callback,
}, nil
}
type hostManager struct {
ctx context.Context
cfg *option.Conf
discover discover.Discover
hostCallback *hostCallback
}
func (h *hostManager) Start() {
if h.cfg.ImageRepositoryHost == "" {
// no need to write hosts file
return
}
h.discover.AddProject("rbd-gateway", h.hostCallback)
}
type hostCallback struct {
cfg *option.Conf
hosts Hosts
}
func (h *hostCallback) UpdateEndpoints(endpoints ...*config.Endpoint) {
logrus.Info("hostCallback; update endpoints")
if err := h.hosts.Cleanup(); err != nil {
logrus.Warningf("cleanup hosts file: %v", err)
return
}
if len(endpoints) > 0 {
logrus.Infof("found endpints: %d; endpoint selected: %#v", len(endpoints), *endpoints[0])
lines := []string{
startOfSection,
endpoints[0].URL + " " + h.cfg.ImageRepositoryHost,
endpoints[0].URL + " " + "region.goodrain.me",
endOfSection,
}
h.hosts.AddLines(lines...)
}
if err := h.hosts.Flush(); err != nil {
logrus.Warningf("flush hosts file: %v", err)
}
}
// CleanupAndFlush cleanup old content and write new ones.
func (h *HostManager) CleanupAndFlush() error {
if h.IP == "" {
return nil
}
hosts, err := NewHosts()
if err != nil {
return fmt.Errorf("error creating hosts: %v", err)
}
if err := hosts.Cleanup(); err != nil {
return fmt.Errorf("error cleanup hosts: %v", err)
}
lines := []string{
"\n",
startOfSection,
h.IP + " " + h.Domain,
endOfSection,
}
hosts.AddLines(lines...)
return hosts.Flush()
func (h *hostCallback) Error(err error) {
logrus.Warningf("unexpected error from host callback: %v", err)
}
// HostsLine represents a single line in the hosts file.
@ -97,7 +136,7 @@ func NewHostsLine(raw string) HostsLine {
if !output.IsComment() {
rawIP := fields[0]
if net.ParseIP(rawIP) == nil {
output.Err = errors.New(fmt.Sprintf("Bad hosts line: %q", raw))
output.Err = fmt.Errorf("Bad hosts line: %q", raw)
}
output.IP = rawIP
@ -121,8 +160,8 @@ type Hosts struct {
}
// NewHosts return a new instance of ``Hosts``.
func NewHosts() (Hosts, error) {
hosts := Hosts{Path: "/etc/hosts"}
func NewHosts(hostsFile string) (Hosts, error) {
hosts := Hosts{Path: hostsFile}
err := hosts.load()
if err != nil {

View File

@ -1,8 +1,15 @@
package initiate
import (
"context"
"fmt"
"github.com/goodrain/rainbond/cmd/node/option"
"github.com/goodrain/rainbond/discover.v2"
"k8s.io/client-go/kubernetes"
"reflect"
"testing"
k8sutil "github.com/goodrain/rainbond/util/k8s"
)
func TestHosts_Cleanup(t *testing.T) {
@ -81,3 +88,37 @@ func TestHosts_Add(t *testing.T) {
})
}
}
func TestHostManager_Start(t *testing.T) {
config, err := k8sutil.NewRestConfig("/Users/abewang/.kube/config")
if err != nil {
t.Error(err)
t.FailNow()
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
t.Error(err)
t.FailNow()
}
ctx := context.Background()
cfg := &option.Conf{
RbdNamespace: "rbd-system",
ImageRepositoryHost: "goodrain.me",
}
k8sDiscover := discover.NewK8sDiscover(ctx, clientset, cfg)
defer k8sDiscover.Stop()
hostManager, err := NewHostManager(cfg, k8sDiscover)
if err != nil {
t.Error(err)
t.FailNow()
}
hostManager.Start()
fmt.Println("oook")
select {
}
}

View File

@ -20,6 +20,8 @@ package kubecache
import (
"fmt"
"github.com/eapache/channels"
"k8s.io/apimachinery/pkg/labels"
"math"
"strings"
"time"
@ -30,34 +32,59 @@ import (
"github.com/goodrain/rainbond/node/nodem/client"
"github.com/Sirupsen/logrus"
k8sutil "github.com/goodrain/rainbond/util/k8s"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
)
// EventType -
type EventType string
const (
//EvictionKind EvictionKind
EvictionKind = "Eviction"
//EvictionSubresource EvictionSubresource
EvictionSubresource = "pods/eviction"
// CreateEvent event associated with new objects in an informer
CreateEvent EventType = "CREATE"
// UpdateEvent event associated with an object update in an informer
UpdateEvent EventType = "UPDATE"
// DeleteEvent event associated when an object is removed from an informer
DeleteEvent EventType = "DELETE"
)
// Event holds the context of an event.
type Event struct {
Type EventType
Obj interface{}
}
type l map[string]string
func (l l) contains(k, v string) bool {
if l == nil {
return false
}
if val, ok := l[k]; !ok || val != v {
return false
}
return true
}
//KubeClient KubeClient
type KubeClient interface {
GetKubeClient() kubernetes.Interface
UpK8sNode(*client.HostNode) (*v1.Node, error)
DownK8sNode(nodename string) error
GetAllPods() (pods []*v1.Pod, err error)
GetPods(namespace string) (pods []*v1.Pod, err error)
GetPodsBySelector(namespace string, selector labels.Selector) (pods []*v1.Pod, err error)
GetNodeByName(nodename string) (*v1.Node, error)
GetNodes() ([]*v1.Node, error)
GetNode(nodeName string) (*v1.Node, error)
@ -72,32 +99,18 @@ type KubeClient interface {
}
//NewKubeClient NewKubeClient
func NewKubeClient(cfg *conf.Conf) (KubeClient, error) {
config, err := k8sutil.NewRestConfig(cfg.K8SConfPath)
if err != nil {
return nil, err
}
config.QPS = 50
config.Burst = 100
cli, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
func NewKubeClient(cfg *conf.Conf, clientset kubernetes.Interface) (KubeClient, error) {
stop := make(chan struct{})
sharedInformers := informers.NewFilteredSharedInformerFactory(cli, cfg.MinResyncPeriod, v1.NamespaceAll,
func(options *metav1.ListOptions) {
//options.LabelSelector = "creator=Rainbond"
})
sharedInformers.Core().V1().Services().Informer()
sharedInformers := informers.NewSharedInformerFactoryWithOptions(clientset, cfg.MinResyncPeriod)
sharedInformers.Core().V1().Endpoints().Informer()
sharedInformers.Core().V1().Services().Informer()
sharedInformers.Core().V1().ConfigMaps().Informer()
sharedInformers.Core().V1().Nodes().Informer()
sharedInformers.Core().V1().Pods().Informer()
sharedInformers.Start(stop)
return &kubeClient{
kubeclient: cli,
kubeclient: clientset,
stop: stop,
sharedInformers: sharedInformers,
}, nil
@ -107,6 +120,7 @@ type kubeClient struct {
kubeclient kubernetes.Interface
sharedInformers informers.SharedInformerFactory
stop chan struct{}
updateCh *channels.RingChannel
}
func (k *kubeClient) Stop() {
@ -115,11 +129,6 @@ func (k *kubeClient) Stop() {
}
}
//GetKubeClient get kube client
func (k *kubeClient) GetKubeClient() kubernetes.Interface {
return k.kubeclient
}
//GetNodeByName get node
func (k *kubeClient) GetNodeByName(nodename string) (*v1.Node, error) {
return k.sharedInformers.Core().V1().Nodes().Lister().Get(nodename)
@ -453,6 +462,10 @@ func (k *kubeClient) UpK8sNode(rainbondNode *client.HostNode) (*v1.Node, error)
return node, nil
}
func (k *kubeClient) GetPodsBySelector(namespace string, selector labels.Selector) ([]*v1.Pod, error) {
return k.sharedInformers.Core().V1().Pods().Lister().Pods(namespace).List(selector)
}
func (k *kubeClient) GetEndpoints(namespace string, selector labels.Selector) ([]*v1.Endpoints, error) {
return k.sharedInformers.Core().V1().Endpoints().Lister().Endpoints(namespace).List(selector)
}

View File

@ -90,7 +90,7 @@ func (w *windowsServiceController) StopList(list []*service.Service) error {
func (w *windowsServiceController) RestartService(s *service.Service) error {
if err := windows.RestartService(s.Name); err != nil {
if strings.Contains(err.Error(), "does not exist") {
if err := w.WriteConfig(s); err != nil {
if _, err := w.WriteConfig(s); err != nil {
return fmt.Errorf("ReWrite service config failure %s", err.Error())
}
}

View File

@ -12,8 +12,6 @@ import (
"k8s.io/client-go/kubernetes"
)
var defaultSecretName = "rbd-docker-secret"
var defaultNamespace = "rbd-system"
var defaultFileName = "server.crt"
var defaultFilePath = "/etc/docker/certs.d/goodrain.me"
@ -21,11 +19,8 @@ var defaultFilePath = "/etc/docker/certs.d/goodrain.me"
func SyncDockerCertFromSecret(clientset kubernetes.Interface, namespace, secretName string) error {
namespace = strings.TrimSpace(namespace)
secretName = strings.TrimSpace(secretName)
if namespace == "" {
namespace = defaultNamespace
}
if secretName == "" {
secretName = defaultSecretName
if namespace == "" || secretName == "" {
return nil
}
secretInfo, err := clientset.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{})
if err != nil {
@ -37,7 +32,7 @@ func SyncDockerCertFromSecret(clientset kubernetes.Interface, namespace, secretN
}
} else {
logrus.Warnf("docker secret:%s do not contain cert info", defaultSecretName)
logrus.Warnf("docker secret: %s/%s do not contain cert info", secretName, namespace)
}
return nil
}

View File

@ -28,8 +28,6 @@ import (
"sync/atomic"
"time"
"github.com/goodrain/rainbond/node/kubecache"
"k8s.io/apimachinery/pkg/labels"
"github.com/Sirupsen/logrus"
@ -256,13 +254,13 @@ func (d *DiscoverServerManager) setSnapshot(nc *NodeConfig) error {
}
//CreateDiscoverServerManager create discover server manager
func CreateDiscoverServerManager(client kubecache.KubeClient, conf option.Conf) (*DiscoverServerManager, error) {
func CreateDiscoverServerManager(clientset kubernetes.Interface, conf option.Conf) (*DiscoverServerManager, error) {
configcache := cache.NewSnapshotCache(false, Hasher{}, logrus.WithField("module", "config-cache"))
ctx, cancel := context.WithCancel(context.Background())
dsm := &DiscoverServerManager{
server: server.NewServer(configcache, nil),
cacheManager: configcache,
kubecli: client.GetKubeClient(),
kubecli: clientset,
conf: conf,
eventChan: make(chan *Event, 100),
pool: &sync.Pool{

View File

@ -324,7 +324,7 @@ func (n *NodeManager) getInitLabel(node *client.HostNode) map[string]string {
node.HostName = hostname
}
labels["rainbond_node_hostname"] = node.HostName
labels["rainbond_node_ip"] = node.InternalIP
labels["kubernetes.io/hostname"] = node.InternalIP
return labels
}

View File

@ -9,13 +9,14 @@ if [ $BUILD_IMAGE_BASE_NAME ];
then
IMAGE_BASE_NAME=${BUILD_IMAGE_BASE_NAME}
fi
GO_VERSION=1.11-stretch
GATEWAY_GO_VERSION=1.11-alpine3.8
GO_VERSION=1.13
GATEWAY_GO_VERSION=1.13-alpine
if [ -z "$VERSION" ];then
if [ -z "$TRAVIS_TAG" ]; then
if [ -z "$TRAVIS_BRANCH" ]; then
VERSION=V5.1-dev
VERSION=V5.2-dev
else
VERSION=$TRAVIS_BRANCH-dev
fi
@ -29,6 +30,53 @@ git_commit=$(git log -n 1 --pretty --format=%h)
release_desc=${VERSION}-${git_commit}-${buildTime}
build::node() {
local releasedir=./.release
local distdir=$releasedir/dist/usr/local
[ ! -d "$distdir" ] && mkdir -p $distdir/bin || rm -rf $distdir/bin/*
echo "---> Build Binary For RBD"
echo "rbd plugins version:$release_desc"
case $1 in
node)
echo "build node"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc} -X github.com/goodrain/rainbond/util/license.enterprise=${ENTERPRISE}" -o $releasedir/dist/usr/local/bin/node ./cmd/node
;;
grctl)
echo "build grctl"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o $releasedir/dist/usr/local/bin/grctl ./cmd/grctl
;;
certutil)
echo "build certutil"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o $releasedir/dist/usr/local/bin/grcert ./cmd/certutil
;;
*)
echo "build node"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc} -X github.com/goodrain/rainbond/util/license.enterprise=${ENTERPRISE}" -o $releasedir/dist/usr/local/bin/node ./cmd/node
if [ "${ENTERPRISE}" = "true" ];then
echo "build grctl enterprise"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc} -X github.com/goodrain/rainbond/util/license.enterprise=${ENTERPRISE}" -o $releasedir/dist/usr/local/bin/grctl ./cmd/grctl
else
echo "build grctl"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o $releasedir/dist/usr/local/bin/grctl ./cmd/grctl
fi
echo "build certutil"
docker run --rm -v `pwd`:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o $releasedir/dist/usr/local/bin/grcert ./cmd/certutil
pushd $distdir
tar zcf pkg.tgz `find . -maxdepth 1|sed 1d`
cat >Dockerfile <<EOF
FROM alpine:3.6
COPY pkg.tgz /
EOF
docker build -t ${BASE_NAME}/cni:rbd_$VERSION .
if [ "$1" = "push" ];then
docker push ${BASE_NAME}/cni:rbd_$VERSION
fi
popd
;;
esac
}
build::binary() {
echo "---> build binary:$1"
local OUTPATH=./_output/$GOOS/${BASE_NAME}-$1
@ -58,34 +106,40 @@ build::binary() {
build::image() {
local REPO_PATH="$PWD"
pushd ./hack/contrib/docker/$1
pushd "./hack/contrib/docker/$1"
echo "---> build binary:$1"
local DOCKER_PATH="./hack/contrib/docker/$1"
if [ "$1" = "eventlog" ];then
docker build -t goodraim.me/event-build:v1 build
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} goodraim.me/event-build:v1 go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/eventlog
docker run --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} goodraim.me/event-build:v1 go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/eventlog
elif [ "$1" = "chaos" ];then
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/builder
docker run --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/builder
elif [ "$1" = "monitor" ];then
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -extldflags '-static' -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -tags 'netgo static_build' -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
docker run -e CGO_ENABLED=0 --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
elif [ "$1" = "gateway" ];then
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} -it golang:${GATEWAY_GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
docker run --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} -it golang:${GATEWAY_GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
elif [ "$1" = "mesh-data-panel" ];then
echo "mesh-data-panel not need build";
else
if [ "${ENTERPRISE}" = "true" ];then
echo "---> ENTERPRISE:${ENTERPRISE}"
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc} -X github.com/goodrain/rainbond/util/license.enterprise=${ENTERPRISE}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
docker run --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc} -X github.com/goodrain/rainbond/util/license.enterprise=${ENTERPRISE}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
else
docker run --rm -v ${REPO_PATH}:${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
docker run --rm -v "${REPO_PATH}":${WORK_DIR} -w ${WORK_DIR} -it golang:${GO_VERSION} go build -ldflags "-w -s -X github.com/goodrain/rainbond/cmd.version=${release_desc}" -o ${DOCKER_PATH}/${BASE_NAME}-$1 ./cmd/$1
fi
fi
echo "---> build image:$1"
sed "s/__RELEASE_DESC__/${release_desc}/" Dockerfile > Dockerfile.release
docker build -t ${IMAGE_BASE_NAME}/rbd-$1:${VERSION} -f Dockerfile.release .
docker build -t "${IMAGE_BASE_NAME}/rbd-$1:${VERSION}" -f Dockerfile.release .
if [ "$2" = "push" ];then
docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
docker push ${IMAGE_BASE_NAME}/rbd-$1:${VERSION}
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD"
docker push "${IMAGE_BASE_NAME}/rbd-$1:${VERSION}"
if [ ${DOMESTIC_BASE_NAME} ];
then
docker tag "${IMAGE_BASE_NAME}/rbd-$1:${VERSION}" "${DOMESTIC_BASE_NAME}/${DOMESTIC_NAMESPACE}/rbd-$1:${VERSION}"
docker login -u "$DOMESTIC_DOCKER_USERNAME" -p "$DOMESTIC_DOCKER_PASSWORD" ${DOMESTIC_BASE_NAME}
docker push "${DOMESTIC_BASE_NAME}/${DOMESTIC_NAMESPACE}/rbd-$1:${VERSION}"
fi
fi
rm -f ./Dockerfile.release
rm -f ./${BASE_NAME}-$1

View File

@ -0,0 +1,6 @@
package constants
const (
// DefImageRepository default private image repository
DefImageRepository = "goodrain.me"
)

View File

@ -6,9 +6,11 @@ import (
"time"
"github.com/Sirupsen/logrus"
rainbondv1alpha1 "github.com/goodrain/rainbond-operator/pkg/apis/rainbond/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
@ -17,6 +19,10 @@ import (
"k8s.io/client-go/tools/reference"
)
func init() {
utilruntime.Must(rainbondv1alpha1.AddToScheme(scheme.Scheme))
}
// NewClientset -
func NewClientset(kubecfg string) (kubernetes.Interface, error) {
c, err := clientcmd.BuildConfigFromFlags("", kubecfg)
@ -50,6 +56,11 @@ func NewRestConfig(kubecfg string) (restConfig *rest.Config, err error) {
return clientcmd.BuildConfigFromFlags("", kubecfg)
}
//NewRestClient new rest client
func NewRestClient(restConfig *rest.Config) (*rest.RESTClient, error) {
return rest.RESTClientFor(restConfig)
}
// InClusterConfig in cluster config
func InClusterConfig() (*rest.Config, error) {
// Work around https://github.com/kubernetes/kubernetes/issues/40973

View File

@ -115,7 +115,7 @@ func (h *HTTPProbe) GetHTTPHealth() map[string]string {
}
logrus.Debugf("http probe check address; %s", address)
resp, err := c.Get(addr.String())
if resp.Body != nil {
if resp != nil && resp.Body != nil {
defer resp.Body.Close()
}
if err != nil {

5
vendor/github.com/PuerkitoBio/purell/.gitignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

12
vendor/github.com/PuerkitoBio/purell/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
go:
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- "1.10.x"
- "1.11.x"
- tip

12
vendor/github.com/PuerkitoBio/purell/LICENSE generated vendored Normal file
View File

@ -0,0 +1,12 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

188
vendor/github.com/PuerkitoBio/purell/README.md generated vendored Normal file
View File

@ -0,0 +1,188 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
@beeker1121
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

379
vendor/github.com/PuerkitoBio/purell/purell.go generated vendored Normal file
View File

@ -0,0 +1,379 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
parsed, err := url.Parse(u)
if err != nil {
return "", err
}
if f&FlagLowercaseHost == FlagLowercaseHost {
parsed.Host = strings.ToLower(parsed.Host)
}
// The idna package doesn't fully conform to RFC 5895
// (https://tools.ietf.org/html/rfc5895), so we do it here.
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
// TODO: Remove when (if?) idna package conforms to RFC 5895.
parsed.Host = width.Fold.String(parsed.Host)
parsed.Host = norm.NFC.String(parsed.Host)
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
return "", err
}
return NormalizeURL(parsed, f), nil
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- tip
install:
- go build .
script:
- go test -v

27
vendor/github.com/PuerkitoBio/urlesc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

16
vendor/github.com/PuerkitoBio/urlesc/README.md generated vendored Normal file
View File

@ -0,0 +1,16 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

66
vendor/github.com/docker/docker/pkg/term/ascii.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
package term
import (
"fmt"
"strings"
)
// ASCII list the possible supported ASCII key sequence
var ASCII = []string{
"ctrl-@",
"ctrl-a",
"ctrl-b",
"ctrl-c",
"ctrl-d",
"ctrl-e",
"ctrl-f",
"ctrl-g",
"ctrl-h",
"ctrl-i",
"ctrl-j",
"ctrl-k",
"ctrl-l",
"ctrl-m",
"ctrl-n",
"ctrl-o",
"ctrl-p",
"ctrl-q",
"ctrl-r",
"ctrl-s",
"ctrl-t",
"ctrl-u",
"ctrl-v",
"ctrl-w",
"ctrl-x",
"ctrl-y",
"ctrl-z",
"ctrl-[",
"ctrl-\\",
"ctrl-]",
"ctrl-^",
"ctrl-_",
}
// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
func ToBytes(keys string) ([]byte, error) {
codes := []byte{}
next:
for _, key := range strings.Split(keys, ",") {
if len(key) != 1 {
for code, ctrl := range ASCII {
if ctrl == key {
codes = append(codes, byte(code))
continue next
}
}
if key == "DEL" {
codes = append(codes, 127)
} else {
return nil, fmt.Errorf("Unknown character: '%s'", key)
}
} else {
codes = append(codes, byte(key[0]))
}
}
return codes, nil
}

43
vendor/github.com/docker/docker/pkg/term/ascii_test.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package term
import "testing"
func TestToBytes(t *testing.T) {
codes, err := ToBytes("ctrl-a,a")
if err != nil {
t.Fatal(err)
}
if len(codes) != 2 {
t.Fatalf("Expected 2 codes, got %d", len(codes))
}
if codes[0] != 1 || codes[1] != 97 {
t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1])
}
codes, err = ToBytes("shift-z")
if err == nil {
t.Fatalf("Expected error, got none")
}
codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o")
if err != nil {
t.Fatal(err)
}
if len(codes) != 4 {
t.Fatalf("Expected 4 codes, got %d", len(codes))
}
if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 {
t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3])
}
codes, err = ToBytes("DEL,+")
if err != nil {
t.Fatal(err)
}
if len(codes) != 2 {
t.Fatalf("Expected 2 codes, got %d", len(codes))
}
if codes[0] != 127 || codes[1] != 43 {
t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1])
}
}

View File

@ -0,0 +1,50 @@
// +build linux,cgo
package term
import (
"syscall"
"unsafe"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
newState := oldState.termios
C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
if err := tcset(fd, &newState); err != 0 {
return nil, err
}
return &oldState, nil
}
func tcget(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}

20
vendor/github.com/docker/docker/pkg/term/tc_other.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// +build !windows
// +build !linux !cgo
// +build !solaris !cgo
package term
import (
"syscall"
"unsafe"
)
func tcget(fd uintptr, p *Termios) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
return err
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
return err
}

View File

@ -0,0 +1,63 @@
// +build solaris,cgo
package term
import (
"syscall"
"unsafe"
)
// #include <termios.h>
import "C"
// Termios is the Unix API for terminal I/O.
// It is passthrough for syscall.Termios in order to make it portable with
// other platforms where it is not available or handled differently.
type Termios syscall.Termios
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
/*
VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
needs to be explicitly set to 1.
*/
newState.Cc[C.VMIN] = 1
newState.Cc[C.VTIME] = 0
if err := tcset(fd, &newState); err != 0 {
return nil, err
}
return &oldState, nil
}
func tcget(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}
func tcset(fd uintptr, p *Termios) syscall.Errno {
ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
if ret != 0 {
return err.(syscall.Errno)
}
return 0
}

123
vendor/github.com/docker/docker/pkg/term/term.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// +build !windows
// Package term provides structures and helper functions to work with
// terminal (state, sizes).
package term
import (
"errors"
"fmt"
"io"
"os"
"os/signal"
"syscall"
)
var (
// ErrInvalidState is returned if the state of the terminal is invalid.
ErrInvalidState = errors.New("Invalid terminal state")
)
// State represents the state of the terminal.
type State struct {
termios Termios
}
// Winsize represents the size of the terminal window.
type Winsize struct {
Height uint16
Width uint16
x uint16
y uint16
}
// StdStreams returns the standard streams (stdin, stdout, stderr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
return os.Stdin, os.Stdout, os.Stderr
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
var inFd uintptr
var isTerminalIn bool
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminalIn = IsTerminal(inFd)
}
return inFd, isTerminalIn
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
var termios Termios
return tcget(fd, &termios) == 0
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
if state == nil {
return ErrInvalidState
}
if err := tcset(fd, &state.termios); err != 0 {
return err
}
return nil
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
var oldState State
if err := tcget(fd, &oldState.termios); err != 0 {
return nil, err
}
return &oldState, nil
}
// DisableEcho applies the specified state to the terminal connected to the file
// descriptor, with echo disabled.
func DisableEcho(fd uintptr, state *State) error {
newState := state.termios
newState.Lflag &^= syscall.ECHO
if err := tcset(fd, &newState); err != 0 {
return err
}
handleInterrupt(fd, state)
return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
// raw mode and returns the previous state. On UNIX, this puts both the input
// and output into raw mode. On Windows, it only puts the input into raw mode.
func SetRawTerminal(fd uintptr) (*State, error) {
oldState, err := MakeRaw(fd)
if err != nil {
return nil, err
}
handleInterrupt(fd, oldState)
return oldState, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
func SetRawTerminalOutput(fd uintptr) (*State, error) {
return nil, nil
}
func handleInterrupt(fd uintptr, state *State) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
go func() {
for range sigchan {
// quit cleanly and the new terminal item is on a new line
fmt.Println()
signal.Stop(sigchan)
close(sigchan)
RestoreTerminal(fd, state)
os.Exit(1)
}
}()
}

View File

@ -0,0 +1,41 @@
// +build solaris
package term
import (
"syscall"
"unsafe"
)
/*
#include <unistd.h>
#include <stropts.h>
#include <termios.h>
// Small wrapper to get rid of variadic args of ioctl()
int my_ioctl(int fd, int cmd, struct winsize *ws) {
return ioctl(fd, cmd, ws);
}
*/
import "C"
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return ws, nil
}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
// Skip retval = 0
if ret == 0 {
return nil
}
return err
}

29
vendor/github.com/docker/docker/pkg/term/term_unix.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// +build !solaris,!windows
package term
import (
"syscall"
"unsafe"
)
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
ws := &Winsize{}
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return ws, nil
}
return ws, err
}
// SetWinsize tries to set the specified window size for the specified file descriptor.
func SetWinsize(fd uintptr, ws *Winsize) error {
_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
// Skipp errno = 0
if err == 0 {
return nil
}
return err
}

View File

@ -0,0 +1,233 @@
// +build windows
package term
import (
"io"
"os"
"os/signal"
"syscall"
"github.com/Azure/go-ansiterm/winterm"
"github.com/docker/docker/pkg/term/windows"
)
// State holds the console mode for the terminal.
type State struct {
mode uint32
}
// Winsize is used for window size.
type Winsize struct {
Height uint16
Width uint16
}
const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
enableVirtualTerminalInput = 0x0200
enableVirtualTerminalProcessing = 0x0004
disableNewlineAutoReturn = 0x0008
)
// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
var vtInputSupported bool
// StdStreams returns the standard streams (stdin, stdout, stderr).
func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
// Turn on VT handling on all std handles, if possible. This might
// fail, in which case we will fall back to terminal emulation.
var emulateStdin, emulateStdout, emulateStderr bool
fd := os.Stdin.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate that enableVirtualTerminalInput is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
emulateStdin = true
} else {
vtInputSupported = true
}
// Unconditionally set the console mode back even on failure because SetConsoleMode
// remembers invalid bits on input handles.
winterm.SetConsoleMode(fd, mode)
}
fd = os.Stdout.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate disableNewlineAutoReturn is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
emulateStdout = true
} else {
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
}
}
fd = os.Stderr.Fd()
if mode, err := winterm.GetConsoleMode(fd); err == nil {
// Validate disableNewlineAutoReturn is supported, but do not set it.
if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
emulateStderr = true
} else {
winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
}
}
if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
// The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
emulateStdin = true
emulateStdout = false
emulateStderr = false
}
if emulateStdin {
stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
} else {
stdIn = os.Stdin
}
if emulateStdout {
stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
} else {
stdOut = os.Stdout
}
if emulateStderr {
stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
} else {
stdErr = os.Stderr
}
return
}
// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
func GetFdInfo(in interface{}) (uintptr, bool) {
return windows.GetHandleInfo(in)
}
// GetWinsize returns the window size based on the specified file descriptor.
func GetWinsize(fd uintptr) (*Winsize, error) {
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil, err
}
winsize := &Winsize{
Width: uint16(info.Window.Right - info.Window.Left + 1),
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
}
return winsize, nil
}
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
return windows.IsConsole(fd)
}
// RestoreTerminal restores the terminal connected to the given file descriptor
// to a previous state.
func RestoreTerminal(fd uintptr, state *State) error {
return winterm.SetConsoleMode(fd, state.mode)
}
// SaveState saves the state of the terminal connected to the given file descriptor.
func SaveState(fd uintptr) (*State, error) {
mode, e := winterm.GetConsoleMode(fd)
if e != nil {
return nil, e
}
return &State{mode: mode}, nil
}
// DisableEcho disables echo for the terminal connected to the given file descriptor.
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
func DisableEcho(fd uintptr, state *State) error {
mode := state.mode
mode &^= winterm.ENABLE_ECHO_INPUT
mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
err := winterm.SetConsoleMode(fd, mode)
if err != nil {
return err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return nil
}
// SetRawTerminal puts the terminal connected to the given file descriptor into
// raw mode and returns the previous state. On UNIX, this puts both the input
// and output into raw mode. On Windows, it only puts the input into raw mode.
func SetRawTerminal(fd uintptr) (*State, error) {
state, err := MakeRaw(fd)
if err != nil {
return nil, err
}
// Register an interrupt handler to catch and restore prior state
restoreAtInterrupt(fd, state)
return state, err
}
// SetRawTerminalOutput puts the output of terminal connected to the given file
// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
// state. On Windows, it disables LF -> CRLF translation.
func SetRawTerminalOutput(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
// Ignore failures, since disableNewlineAutoReturn might not be supported on this
// version of Windows.
winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
return state, err
}
// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be restored.
func MakeRaw(fd uintptr) (*State, error) {
state, err := SaveState(fd)
if err != nil {
return nil, err
}
mode := state.mode
// See
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
// Disable these modes
mode &^= winterm.ENABLE_ECHO_INPUT
mode &^= winterm.ENABLE_LINE_INPUT
mode &^= winterm.ENABLE_MOUSE_INPUT
mode &^= winterm.ENABLE_WINDOW_INPUT
mode &^= winterm.ENABLE_PROCESSED_INPUT
// Enable these modes
mode |= winterm.ENABLE_EXTENDED_FLAGS
mode |= winterm.ENABLE_INSERT_MODE
mode |= winterm.ENABLE_QUICK_EDIT_MODE
if vtInputSupported {
mode |= enableVirtualTerminalInput
}
err = winterm.SetConsoleMode(fd, mode)
if err != nil {
return nil, err
}
return state, nil
}
func restoreAtInterrupt(fd uintptr, state *State) {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, os.Interrupt)
go func() {
_ = <-sigchan
RestoreTerminal(fd, state)
os.Exit(0)
}()
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint64
Oflag uint64
Cflag uint64
Lflag uint64
Cc [20]byte
Ispeed uint64
Ospeed uint64
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,47 @@
// +build !cgo
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TCGETS
setTermios = syscall.TCSETS
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
newState.Oflag &^= syscall.OPOST
newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
newState.Cflag |= syscall.CS8
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,69 @@
package term
import (
"syscall"
"unsafe"
)
const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
)
// Termios magic numbers, passthrough to the ones defined in syscall.
const (
IGNBRK = syscall.IGNBRK
PARMRK = syscall.PARMRK
INLCR = syscall.INLCR
IGNCR = syscall.IGNCR
ECHONL = syscall.ECHONL
CSIZE = syscall.CSIZE
ICRNL = syscall.ICRNL
ISTRIP = syscall.ISTRIP
PARENB = syscall.PARENB
ECHO = syscall.ECHO
ICANON = syscall.ICANON
ISIG = syscall.ISIG
IXON = syscall.IXON
BRKINT = syscall.BRKINT
INPCK = syscall.INPCK
OPOST = syscall.OPOST
CS8 = syscall.CS8
IEXTEN = syscall.IEXTEN
)
// Termios is the Unix API for terminal I/O.
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [20]byte
Ispeed uint32
Ospeed uint32
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd uintptr) (*State, error) {
var oldState State
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
return nil, err
}
newState := oldState.termios
newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
newState.Oflag &^= OPOST
newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
newState.Cflag &^= (CSIZE | PARENB)
newState.Cflag |= CS8
newState.Cc[syscall.VMIN] = 1
newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
}
return &oldState, nil
}

View File

@ -0,0 +1,263 @@
// +build windows
package windows
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"unsafe"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
)
const (
escapeSequence = ansiterm.KEY_ESC_CSI
)
// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
type ansiReader struct {
file *os.File
fd uintptr
buffer []byte
cbBuffer int
command []byte
}
// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
// Windows console input handle.
func NewAnsiReader(nFile int) io.ReadCloser {
initLogger()
file, fd := winterm.GetStdFile(nFile)
return &ansiReader{
file: file,
fd: fd,
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
buffer: make([]byte, 0),
}
}
// Close closes the wrapped file.
func (ar *ansiReader) Close() (err error) {
return ar.file.Close()
}
// Fd returns the file descriptor of the wrapped file.
func (ar *ansiReader) Fd() uintptr {
return ar.fd
}
// Read reads up to len(p) bytes of translated input events into p.
func (ar *ansiReader) Read(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
// Previously read bytes exist, read as much as we can and return
if len(ar.buffer) > 0 {
logger.Debugf("Reading previously cached bytes")
originalLength := len(ar.buffer)
copiedLength := copy(p, ar.buffer)
if copiedLength == originalLength {
ar.buffer = make([]byte, 0, len(p))
} else {
ar.buffer = ar.buffer[copiedLength:]
}
logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
return copiedLength, nil
}
// Read and translate key events
events, err := readInputEvents(ar.fd, len(p))
if err != nil {
return 0, err
} else if len(events) == 0 {
logger.Debug("No input events detected")
return 0, nil
}
keyBytes := translateKeyEvents(events, []byte(escapeSequence))
// Save excess bytes and right-size keyBytes
if len(keyBytes) > len(p) {
logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
ar.buffer = keyBytes[len(p):]
keyBytes = keyBytes[:len(p)]
} else if len(keyBytes) == 0 {
logger.Debug("No key bytes returned from the translator")
return 0, nil
}
copiedLength := copy(p, keyBytes)
if copiedLength != len(keyBytes) {
return 0, errors.New("unexpected copy length encountered")
}
logger.Debugf("Read p[%d]: % x", copiedLength, p)
logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
return copiedLength, nil
}
// readInputEvents polls until at least one event is available.
func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
// Determine the maximum number of records to retrieve
// -- Cast around the type system to obtain the size of a single INPUT_RECORD.
// unsafe.Sizeof requires an expression vs. a type-reference; the casting
// tricks the type system into believing it has such an expression.
recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
countRecords := maxBytes / recordSize
if countRecords > ansiterm.MAX_INPUT_EVENTS {
countRecords = ansiterm.MAX_INPUT_EVENTS
} else if countRecords == 0 {
countRecords = 1
}
logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
// Wait for and read input events
events := make([]winterm.INPUT_RECORD, countRecords)
nEvents := uint32(0)
eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
if err != nil {
return nil, err
}
if eventsExist {
err = winterm.ReadConsoleInput(fd, events, &nEvents)
if err != nil {
return nil, err
}
}
// Return a slice restricted to the number of returned records
logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
return events[:nEvents], nil
}
// KeyEvent Translation Helpers
var arrowKeyMapPrefix = map[uint16]string{
winterm.VK_UP: "%s%sA",
winterm.VK_DOWN: "%s%sB",
winterm.VK_RIGHT: "%s%sC",
winterm.VK_LEFT: "%s%sD",
}
var keyMapPrefix = map[uint16]string{
winterm.VK_UP: "\x1B[%sA",
winterm.VK_DOWN: "\x1B[%sB",
winterm.VK_RIGHT: "\x1B[%sC",
winterm.VK_LEFT: "\x1B[%sD",
winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
winterm.VK_INSERT: "\x1B[2%s~",
winterm.VK_DELETE: "\x1B[3%s~",
winterm.VK_PRIOR: "\x1B[5%s~",
winterm.VK_NEXT: "\x1B[6%s~",
winterm.VK_F1: "",
winterm.VK_F2: "",
winterm.VK_F3: "\x1B[13%s~",
winterm.VK_F4: "\x1B[14%s~",
winterm.VK_F5: "\x1B[15%s~",
winterm.VK_F6: "\x1B[17%s~",
winterm.VK_F7: "\x1B[18%s~",
winterm.VK_F8: "\x1B[19%s~",
winterm.VK_F9: "\x1B[20%s~",
winterm.VK_F10: "\x1B[21%s~",
winterm.VK_F11: "\x1B[23%s~",
winterm.VK_F12: "\x1B[24%s~",
}
// translateKeyEvents converts the input events into the appropriate ANSI string.
func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
var buffer bytes.Buffer
for _, event := range events {
if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
}
}
return buffer.Bytes()
}
// keyToString maps the given input event record to the corresponding string.
func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
if keyEvent.UnicodeChar == 0 {
return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
}
_, alt, control := getControlKeys(keyEvent.ControlKeyState)
if control {
// TODO(azlinux): Implement following control sequences
// <Ctrl>-D Signals the end of input from the keyboard; also exits current shell.
// <Ctrl>-H Deletes the first character to the left of the cursor. Also called the ERASE key.
// <Ctrl>-Q Restarts printing after it has been stopped with <Ctrl>-s.
// <Ctrl>-S Suspends printing on the screen (does not stop the program).
// <Ctrl>-U Deletes all characters on the current line. Also called the KILL key.
// <Ctrl>-E Quits current command and creates a core
}
// <Alt>+Key generates ESC N Key
if !control && alt {
return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
}
return string(keyEvent.UnicodeChar)
}
// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
shift, alt, control := getControlKeys(controlState)
modifier := getControlKeysModifier(shift, alt, control)
if format, ok := arrowKeyMapPrefix[key]; ok {
return fmt.Sprintf(format, escapeSequence, modifier)
}
if format, ok := keyMapPrefix[key]; ok {
return fmt.Sprintf(format, modifier)
}
return ""
}
// getControlKeys extracts the shift, alt, and ctrl key states.
func getControlKeys(controlState uint32) (shift, alt, control bool) {
shift = 0 != (controlState & winterm.SHIFT_PRESSED)
alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
return shift, alt, control
}
// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
func getControlKeysModifier(shift, alt, control bool) string {
if shift && alt && control {
return ansiterm.KEY_CONTROL_PARAM_8
}
if alt && control {
return ansiterm.KEY_CONTROL_PARAM_7
}
if shift && control {
return ansiterm.KEY_CONTROL_PARAM_6
}
if control {
return ansiterm.KEY_CONTROL_PARAM_5
}
if shift && alt {
return ansiterm.KEY_CONTROL_PARAM_4
}
if alt {
return ansiterm.KEY_CONTROL_PARAM_3
}
if shift {
return ansiterm.KEY_CONTROL_PARAM_2
}
return ""
}

View File

@ -0,0 +1,64 @@
// +build windows
package windows
import (
"io"
"os"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Azure/go-ansiterm/winterm"
)
// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
type ansiWriter struct {
file *os.File
fd uintptr
infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
command []byte
escapeSequence []byte
inAnsiSequence bool
parser *ansiterm.AnsiParser
}
// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
// Windows console output handle.
func NewAnsiWriter(nFile int) io.Writer {
initLogger()
file, fd := winterm.GetStdFile(nFile)
info, err := winterm.GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
logger.Infof("newAnsiWriter: parser %p", parser)
aw := &ansiWriter{
file: file,
fd: fd,
infoReset: info,
command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
parser: parser,
}
logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
logger.Infof("newAnsiWriter: %v", aw)
return aw
}
func (aw *ansiWriter) Fd() uintptr {
return aw.fd
}
// Write writes len(p) bytes from p to the underlying data stream.
func (aw *ansiWriter) Write(p []byte) (total int, err error) {
if len(p) == 0 {
return 0, nil
}
logger.Infof("Write: % x", p)
logger.Infof("Write: %s", string(p))
return aw.parser.Parse(p)
}

View File

@ -0,0 +1,35 @@
// +build windows
package windows
import (
"os"
"github.com/Azure/go-ansiterm/winterm"
)
// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
func GetHandleInfo(in interface{}) (uintptr, bool) {
switch t := in.(type) {
case *ansiReader:
return t.Fd(), true
case *ansiWriter:
return t.Fd(), true
}
var inFd uintptr
var isTerminal bool
if file, ok := in.(*os.File); ok {
inFd = file.Fd()
isTerminal = IsConsole(inFd)
}
return inFd, isTerminal
}
// IsConsole returns true if the given file descriptor is a Windows Console.
// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
func IsConsole(fd uintptr) bool {
_, e := winterm.GetConsoleMode(fd)
return e == nil
}

View File

@ -0,0 +1,33 @@
// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
package windows
import (
"io/ioutil"
"os"
"sync"
ansiterm "github.com/Azure/go-ansiterm"
"github.com/Sirupsen/logrus"
)
var logger *logrus.Logger
var initOnce sync.Once
func initLogger() {
initOnce.Do(func() {
logFile := ioutil.Discard
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ = os.Create("ansiReaderWriter.log")
}
logger = &logrus.Logger{
Out: logFile,
Formatter: new(logrus.TextFormatter),
Level: logrus.DebugLevel,
}
})
}

View File

@ -0,0 +1,3 @@
// This file is necessary to pass the Docker tests.
package windows

13
vendor/github.com/docker/spdystream/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,13 @@
# Contributing to SpdyStream
Want to hack on spdystream? Awesome! Here are instructions to get you
started.
SpdyStream is a part of the [Docker](https://docker.io) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
Happy hacking!

191
vendor/github.com/docker/spdystream/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

425
vendor/github.com/docker/spdystream/LICENSE.docs generated vendored Normal file
View File

@ -0,0 +1,425 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public licenses.
Notwithstanding, Creative Commons may elect to apply one of its public
licenses to material it publishes and in those instances will be
considered the "Licensor." Except for the limited purpose of indicating
that material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the public
licenses.
Creative Commons may be contacted at creativecommons.org.

28
vendor/github.com/docker/spdystream/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Spdystream maintainers file
#
# This file describes who runs the docker/spdystream project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"dmcgowan",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@docker.com"
GitHub = "dmcgowan"

Some files were not shown because too many files have changed in this diff Show More