mirror of
https://gitee.com/rainbond/Rainbond.git
synced 2024-12-05 05:07:38 +08:00
Merge branch 'master' of github.com:goodrain/rainbond
This commit is contained in:
commit
2145f90586
1
Makefile
1
Makefile
@ -112,6 +112,7 @@ run-node:build-node
|
||||
--run-mode=master --kube-conf=`pwd`/test/admin.kubeconfig \
|
||||
--nodeid-file=`pwd`/test/host_id.conf \
|
||||
--static-task-path=`pwd`/test/tasks \
|
||||
--statsd.mapping-config=`pwd`/test/mapper.yml \
|
||||
--log-level=debug
|
||||
|
||||
doc: ## build the docs
|
||||
|
@ -0,0 +1,19 @@
|
||||
# Component Description
|
||||
|
||||
## API
|
||||
|
||||
## BUILDER
|
||||
|
||||
## ENTRANCE
|
||||
|
||||
## EVENTLOG
|
||||
|
||||
## GRCTL
|
||||
|
||||
## MQ
|
||||
|
||||
## NODE
|
||||
|
||||
## WEBCLI
|
||||
|
||||
## WORKER
|
@ -107,6 +107,8 @@ func (s *LogServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&s.Conf.EventStore.DB.HomePath, "docker.log.homepath", "/grdata/logs/", "container log persistent home path")
|
||||
fs.StringVar(&s.Conf.WebHook.ConsoleURL, "webhook.console.url", "http://console.goodrain.me", "console web api url")
|
||||
fs.StringVar(&s.Conf.WebHook.ConsoleToken, "webhook.console.token", "", "console web api token")
|
||||
fs.StringVar(&s.Conf.Entry.NewMonitorMessageServerConf.ListenerHost, "monitor.udp.host", "0.0.0.0", "receive new monitor udp server host")
|
||||
fs.IntVar(&s.Conf.Entry.NewMonitorMessageServerConf.ListenerPort, "monitor.udp.port", 6166, "receive new monitor udp server port")
|
||||
}
|
||||
|
||||
//InitLog 初始化log
|
||||
@ -227,6 +229,16 @@ func (s *LogServer) Run() error {
|
||||
}
|
||||
defer grpckeepalive.Stop()
|
||||
|
||||
udpkeepalive, err := discover.CreateKeepAlive(s.Conf.Cluster.Discover.EtcdAddr, "event_log_event_udp",
|
||||
s.Conf.Cluster.Discover.InstanceIP, s.Conf.Cluster.Discover.InstanceIP, s.Conf.Entry.NewMonitorMessageServerConf.ListenerPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := udpkeepalive.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer udpkeepalive.Stop()
|
||||
|
||||
httpkeepalive, err := discover.CreateKeepAlive(s.Conf.Cluster.Discover.EtcdAddr, "event_log_event_http",
|
||||
s.Conf.Cluster.Discover.InstanceIP, s.Conf.Cluster.Discover.InstanceIP, s.Conf.WebSocket.BindPort)
|
||||
if err != nil {
|
||||
|
@ -114,7 +114,24 @@ type Conf struct {
|
||||
// 默认 300
|
||||
LockTTL int64
|
||||
|
||||
Etcd client.Config
|
||||
Etcd client.Config
|
||||
StatsdConfig StatsdConfig
|
||||
UDPMonitorConfig UDPMonitorConfig
|
||||
}
|
||||
|
||||
//StatsdConfig StatsdConfig
|
||||
type StatsdConfig struct {
|
||||
StatsdListenAddress string
|
||||
StatsdListenUDP string
|
||||
StatsdListenTCP string
|
||||
MappingConfig string
|
||||
ReadBuffer int
|
||||
}
|
||||
|
||||
//UDPMonitorConfig UDPMonitorConfig
|
||||
type UDPMonitorConfig struct {
|
||||
ListenHost string
|
||||
ListenPort string
|
||||
}
|
||||
|
||||
//AddFlags AddFlags
|
||||
@ -153,6 +170,11 @@ func (a *Conf) AddFlags(fs *pflag.FlagSet) {
|
||||
//fs.StringSliceVar(&a.EventServerAddress, "event-servers", []string{"http://127.0.0.1:6363"}, "event message server address.")
|
||||
fs.StringVar(&a.DBType, "db-type", "mysql", "db type mysql or etcd")
|
||||
fs.StringVar(&a.DBConnectionInfo, "mysql", "admin:admin@tcp(127.0.0.1:3306)/region", "mysql db connection info")
|
||||
fs.StringVar(&a.StatsdConfig.StatsdListenAddress, "statsd.listen-address", "", "The UDP address on which to receive statsd metric lines. DEPRECATED, use statsd.listen-udp instead.")
|
||||
fs.StringVar(&a.StatsdConfig.StatsdListenUDP, "statsd.listen-udp", ":9125", "The UDP address on which to receive statsd metric lines. \"\" disables it.")
|
||||
fs.StringVar(&a.StatsdConfig.StatsdListenTCP, "statsd.listen-tcp", ":9125", "The TCP address on which to receive statsd metric lines. \"\" disables it.")
|
||||
fs.StringVar(&a.StatsdConfig.MappingConfig, "statsd.mapping-config", "", "Metric mapping configuration file name.")
|
||||
fs.IntVar(&a.StatsdConfig.ReadBuffer, "statsd.read-buffer", 0, "Size (in bytes) of the operating system's transmit read buffer associated with the UDP connection. Please make sure the kernel parameters net.core.rmem_max is set to a value greater than the value specified.")
|
||||
}
|
||||
|
||||
//SetLog 设置log
|
||||
|
@ -20,28 +20,33 @@ package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/node/option"
|
||||
"github.com/goodrain/rainbond/pkg/node/api/controller"
|
||||
"github.com/goodrain/rainbond/pkg/node/core/job"
|
||||
"github.com/goodrain/rainbond/pkg/node/core/k8s"
|
||||
"github.com/goodrain/rainbond/pkg/node/core/store"
|
||||
"github.com/goodrain/rainbond/pkg/node/masterserver"
|
||||
"github.com/goodrain/rainbond/pkg/node/monitormessage"
|
||||
"github.com/goodrain/rainbond/pkg/node/nodeserver"
|
||||
"github.com/goodrain/rainbond/pkg/node/statsd"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
|
||||
eventLog "github.com/goodrain/rainbond/pkg/event"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/node/api"
|
||||
"github.com/goodrain/rainbond/pkg/node/event"
|
||||
"bytes"
|
||||
"os/exec"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"net/http"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/node/api"
|
||||
"github.com/goodrain/rainbond/pkg/node/event"
|
||||
)
|
||||
|
||||
//Run start run
|
||||
@ -53,6 +58,7 @@ func Run(c *option.Conf) error {
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Errorf("error creating eventlog manager")
|
||||
return nil
|
||||
}
|
||||
defer eventLog.CloseManager()
|
||||
|
||||
@ -87,7 +93,7 @@ func Run(c *option.Conf) error {
|
||||
logrus.Errorf(err.Error())
|
||||
return err
|
||||
}
|
||||
if !s.HostNode.Role.HasRule("compute"){
|
||||
if !s.HostNode.Role.HasRule("compute") {
|
||||
getInfoForMaster(s)
|
||||
}
|
||||
ms.Cluster.UpdateNode(s.HostNode)
|
||||
@ -97,8 +103,19 @@ func Run(c *option.Conf) error {
|
||||
}
|
||||
event.On(event.EXIT, ms.Stop)
|
||||
}
|
||||
//statsd exporter
|
||||
registry := prometheus.NewRegistry()
|
||||
exporter := statsd.CreateExporter(c.StatsdConfig, registry)
|
||||
if err := exporter.Start(); err != nil {
|
||||
logrus.Errorf("start statsd exporter server error,%s", err.Error())
|
||||
return err
|
||||
}
|
||||
meserver := monitormessage.CreateUDPServer("0.0.0.0", 6666)
|
||||
if err := meserver.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
//启动API服务
|
||||
apiManager := api.NewManager(*s.Conf, s.HostNode, ms)
|
||||
apiManager := api.NewManager(*s.Conf, s.HostNode, ms, exporter)
|
||||
if err := apiManager.Start(errChan); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -127,53 +144,53 @@ func getInfoForMaster(s *nodeserver.NodeServer) {
|
||||
logrus.Errorf("error get response from sysinfo script,details %s", err.Error())
|
||||
return
|
||||
}
|
||||
cmd := exec.Command("bash","-c", string(b))
|
||||
cmd := exec.Command("bash", "-c", string(b))
|
||||
|
||||
//cmd := exec.Command("bash", "/usr/share/gr-rainbond-node/gaops/jobs/install/manage/tasks/ex_domain.sh")
|
||||
outbuf:=bytes.NewBuffer(nil)
|
||||
cmd.Stderr=outbuf
|
||||
err=cmd.Run()
|
||||
outbuf := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = outbuf
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
logrus.Infof("err run command ,details %s",err.Error())
|
||||
logrus.Infof("err run command ,details %s", err.Error())
|
||||
return
|
||||
}
|
||||
result:=make(map[string]string)
|
||||
result := make(map[string]string)
|
||||
|
||||
out:=outbuf.Bytes()
|
||||
logrus.Infof("get system info is %s ",string(out))
|
||||
err=json.Unmarshal(out,&result)
|
||||
out := outbuf.Bytes()
|
||||
logrus.Infof("get system info is %s ", string(out))
|
||||
err = json.Unmarshal(out, &result)
|
||||
if err != nil {
|
||||
logrus.Infof("err unmarshal shell output ,details %s",err.Error())
|
||||
logrus.Infof("err unmarshal shell output ,details %s", err.Error())
|
||||
return
|
||||
}
|
||||
s.HostNode.NodeStatus=&v1.NodeStatus{
|
||||
NodeInfo:v1.NodeSystemInfo{
|
||||
KernelVersion:result["KERNEL"],
|
||||
Architecture:result["PLATFORM"],
|
||||
OperatingSystem:result["OS"],
|
||||
KubeletVersion:"N/A",
|
||||
s.HostNode.NodeStatus = &v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
KernelVersion: result["KERNEL"],
|
||||
Architecture: result["PLATFORM"],
|
||||
OperatingSystem: result["OS"],
|
||||
KubeletVersion: "N/A",
|
||||
},
|
||||
}
|
||||
if cpuStr,ok:=result["LOGIC_CORES"];ok{
|
||||
if cpu,err:=strconv.Atoi(cpuStr);err==nil{
|
||||
logrus.Infof("server cpu is %v",cpu)
|
||||
s.HostNode.AvailableCPU=int64(cpu)
|
||||
if cpuStr, ok := result["LOGIC_CORES"]; ok {
|
||||
if cpu, err := strconv.Atoi(cpuStr); err == nil {
|
||||
logrus.Infof("server cpu is %v", cpu)
|
||||
s.HostNode.AvailableCPU = int64(cpu)
|
||||
s.HostNode.NodeStatus.Allocatable.Cpu().Set(int64(cpu))
|
||||
}
|
||||
}
|
||||
|
||||
if memStr,ok:=result["MEMORY"];ok{
|
||||
memStr=strings.Replace(memStr," ","",-1)
|
||||
memStr=strings.Replace(memStr,"G","",-1)
|
||||
memStr=strings.Replace(memStr,"B","",-1)
|
||||
if mem,err:=strconv.ParseFloat(memStr,64);err==nil{
|
||||
s.HostNode.AvailableMemory=int64(mem*1024*1024*1024)
|
||||
s.HostNode.NodeStatus.Allocatable.Memory().SetScaled(int64(mem*1024*1024*1024),0)
|
||||
}else {
|
||||
logrus.Warnf("get master memory info failed ,details %s",err.Error())
|
||||
if memStr, ok := result["MEMORY"]; ok {
|
||||
memStr = strings.Replace(memStr, " ", "", -1)
|
||||
memStr = strings.Replace(memStr, "G", "", -1)
|
||||
memStr = strings.Replace(memStr, "B", "", -1)
|
||||
if mem, err := strconv.ParseFloat(memStr, 64); err == nil {
|
||||
s.HostNode.AvailableMemory = int64(mem * 1024 * 1024 * 1024)
|
||||
s.HostNode.NodeStatus.Allocatable.Memory().SetScaled(int64(mem*1024*1024*1024), 0)
|
||||
} else {
|
||||
logrus.Warnf("get master memory info failed ,details %s", err.Error())
|
||||
}
|
||||
}
|
||||
logrus.Infof("memory is %v",s.HostNode.AvailableMemory)
|
||||
logrus.Infof("memory is %v", s.HostNode.AvailableMemory)
|
||||
s.Update()
|
||||
|
||||
}
|
||||
|
@ -2086,6 +2086,38 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v2/tenants/{tenant_name}/protocols": {
|
||||
"get": {
|
||||
"description": "get region protocols",
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/xml"
|
||||
],
|
||||
"tags": [
|
||||
"v2"
|
||||
],
|
||||
"summary": "获取当前数据中心支持的protocols",
|
||||
"operationId": "getSupportProtocols",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"x-go-name": "TenantName",
|
||||
"description": "tenant name",
|
||||
"name": "tenant_name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"default": {
|
||||
"description": "统一返回格式",
|
||||
"schema": {
|
||||
"$ref": "#/responses/commandResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v2/tenants/{tenant_name}/res": {
|
||||
"get": {
|
||||
"description": "get tenant resources",
|
||||
|
@ -10,8 +10,7 @@ ENV PKG_URL="http://goodrain-pkg.oss-cn-shanghai.aliyuncs.com/pkg"
|
||||
RUN curl $PKG_URL/labor_docker.tar.gz | tar -xzC /usr/bin/ \
|
||||
&& curl $PKG_URL/labor_libzmq.tar.gz | tar -xzC /usr/local/ \
|
||||
&& adduser -u 200 -D -S rain \
|
||||
&& echo 'rain ALL = (root) NOPASSWD:/usr/bin/docker' > /etc/sudoers.d/rain \
|
||||
&& echo 'rain ALL = (root) NOPASSWD:/usr/bin/git' > /etc/sudoers.d/rain \
|
||||
&& echo 'rain ALL = (root) NOPASSWD: ALL' > /etc/sudoers.d/rain \
|
||||
&& curl https://bootstrap.pypa.io/get-pip.py | python -
|
||||
|
||||
ADD rainbond-chaos /run/rainbond-chaos
|
||||
|
@ -330,7 +330,7 @@ class RepoBuilder():
|
||||
h = self.user_cs_client
|
||||
try:
|
||||
h.update_service(self.service_id, json.dumps(update_items))
|
||||
self.region_client.update_service_region(self.service_id,json.dumps(update_items))
|
||||
self.region_client.update_service_region(self.service_id, json.dumps(update_items))
|
||||
except h.CallApiError, e:
|
||||
self.log.error(
|
||||
"网络异常,更新应用镜像名称失败. {}".format(e.message),
|
||||
|
@ -34,6 +34,7 @@ type TenantInterface interface {
|
||||
TenantsGetByName(w http.ResponseWriter, r *http.Request)
|
||||
SumTenants(w http.ResponseWriter, r *http.Request)
|
||||
SingleTenantResources(w http.ResponseWriter, r *http.Request)
|
||||
GetSupportProtocols(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
//ServiceInterface ServiceInterface
|
||||
|
@ -60,6 +60,7 @@ func (v2 *V2) tenantNameRouter() chi.Router {
|
||||
r.Delete("/", controller.GetManager().Tenant)
|
||||
//租户中的日志
|
||||
r.Post("/event-log", controller.GetManager().TenantLogByAction)
|
||||
r.Get("/protocols", controller.GetManager().GetSupportProtocols)
|
||||
//代码检测
|
||||
r.Post("/code-check", controller.GetManager().CheckCode)
|
||||
r.Post("/cloud-share", controller.GetManager().ShareCloud)
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -31,6 +30,7 @@ func Routes() chi.Router {
|
||||
r.Get("/docker_console", controller.GetDockerConsole().Get)
|
||||
r.Get("/docker_log", controller.GetDockerLog().Get)
|
||||
r.Get("/monitor_message", controller.GetMonitorMessage().Get)
|
||||
r.Get("/new_monitor_message", controller.GetMonitorMessage().Get)
|
||||
r.Get("/event_log", controller.GetEventLog().Get)
|
||||
return r
|
||||
}
|
||||
|
@ -1475,29 +1475,27 @@ func (t *TenantStruct) Probe(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
//AddProbe add probe
|
||||
// swagger:operation POST /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 addProbe
|
||||
//
|
||||
// 增加应用探针
|
||||
//
|
||||
// add probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
func (t *TenantStruct) AddProbe(w http.ResponseWriter, r *http.Request) {
|
||||
// swagger:operation POST /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 addProbe
|
||||
//
|
||||
// 增加应用探针
|
||||
//
|
||||
// add probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
|
||||
logrus.Debugf("trans add probe dependency service ")
|
||||
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
|
||||
var tsp api_model.ServiceProbe
|
||||
if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &tsp, nil); !ok {
|
||||
@ -1526,29 +1524,27 @@ func (t *TenantStruct) AddProbe(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
//UpdateProbe update probe
|
||||
// swagger:operation PUT /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 updateProbe
|
||||
//
|
||||
// 更新应用探针信息, *注意此处为全量更新
|
||||
//
|
||||
// update probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
func (t *TenantStruct) UpdateProbe(w http.ResponseWriter, r *http.Request) {
|
||||
// swagger:operation PUT /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 updateProbe
|
||||
//
|
||||
// 更新应用探针信息, *注意此处为全量更新
|
||||
//
|
||||
// update probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
|
||||
logrus.Debugf("trans update probe dependency service ")
|
||||
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
|
||||
var tsp api_model.ServiceProbe
|
||||
if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &tsp, nil); !ok {
|
||||
@ -1582,29 +1578,27 @@ func (t *TenantStruct) UpdateProbe(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
//DeleteProbe delete probe
|
||||
// swagger:operation DELETE /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 deleteProbe
|
||||
//
|
||||
// 删除应用探针
|
||||
//
|
||||
// delete probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
func (t *TenantStruct) DeleteProbe(w http.ResponseWriter, r *http.Request) {
|
||||
// swagger:operation DELETE /v2/tenants/{tenant_name}/services/{service_alias}/probe v2 deleteProbe
|
||||
//
|
||||
// 删除应用探针
|
||||
//
|
||||
// delete probe
|
||||
//
|
||||
// ---
|
||||
// consumes:
|
||||
// - application/json
|
||||
// - application/x-protobuf
|
||||
//
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
|
||||
logrus.Debugf("trans delete probe dependency service ")
|
||||
serviceID := r.Context().Value(middleware.ContextKey("service_id")).(string)
|
||||
var tsp api_model.ServiceProbe
|
||||
if ok := httputil.ValidatorRequestStructAndErrorResponse(r, w, &tsp, nil); !ok {
|
||||
@ -1649,30 +1643,29 @@ func (t *TenantStruct) UpdatePort(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
//SingleTenantResources SingleTenantResources
|
||||
// swagger:operation GET /v2/tenants/{tenant_name}/resources v2 singletenantResources
|
||||
//
|
||||
// 指定租户资源使用情况
|
||||
//
|
||||
// get tenant resources
|
||||
//
|
||||
// ---
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
// parameters:
|
||||
// - name: tenant_name
|
||||
// in: path
|
||||
// description: tenant name
|
||||
// required: true
|
||||
// type: string
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
func (t *TenantStruct) SingleTenantResources(w http.ResponseWriter, r *http.Request) {
|
||||
// swagger:operation GET /v2/tenants/{tenant_name}/resources v2 singletenantResources
|
||||
//
|
||||
// 指定租户资源使用情况
|
||||
//
|
||||
// get tenant resources
|
||||
//
|
||||
// ---
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
// parameters:
|
||||
// - name: tenant_name
|
||||
// in: path
|
||||
// description: tenant name
|
||||
// required: true
|
||||
// type: string
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
|
||||
tenantID := r.Context().Value(middleware.ContextKey("tenant_id")).(string)
|
||||
//11ms
|
||||
services, err := handler.GetServiceManager().GetService(tenantID)
|
||||
@ -1690,3 +1683,36 @@ func (t *TenantStruct) SingleTenantResources(w http.ResponseWriter, r *http.Requ
|
||||
httputil.ReturnSuccess(r, w, statsInfo)
|
||||
return
|
||||
}
|
||||
|
||||
//GetSupportProtocols GetSupportProtocols
|
||||
// swagger:operation GET /v2/tenants/{tenant_name}/protocols v2 getSupportProtocols
|
||||
//
|
||||
// 获取当前数据中心支持的protocols
|
||||
//
|
||||
// get region protocols
|
||||
//
|
||||
// ---
|
||||
// produces:
|
||||
// - application/json
|
||||
// - application/xml
|
||||
// parameters:
|
||||
// - name: tenant_name
|
||||
// in: path
|
||||
// description: tenant name
|
||||
// required: true
|
||||
// type: string
|
||||
//
|
||||
// responses:
|
||||
// default:
|
||||
// schema:
|
||||
// "$ref": "#/responses/commandResponse"
|
||||
// description: 统一返回格式
|
||||
func (t *TenantStruct) GetSupportProtocols(w http.ResponseWriter, r *http.Request) {
|
||||
rps, err := handler.GetTenantManager().GetProtocols()
|
||||
if err != nil {
|
||||
err.Handle(r, w)
|
||||
return
|
||||
}
|
||||
httputil.ReturnSuccess(r, w, rps)
|
||||
return
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
@ -22,13 +21,14 @@ package handler
|
||||
import (
|
||||
"github.com/goodrain/rainbond/cmd/api/option"
|
||||
api_model "github.com/goodrain/rainbond/pkg/api/model"
|
||||
"github.com/goodrain/rainbond/pkg/api/util"
|
||||
dbmodel "github.com/goodrain/rainbond/pkg/db/model"
|
||||
)
|
||||
|
||||
//TenantHandler tenant handler
|
||||
type TenantHandler interface {
|
||||
GetTenants() ([]*dbmodel.Tenants, error)
|
||||
GetTenantsPaged(offset,len int) ([]*dbmodel.Tenants, error)
|
||||
GetTenantsPaged(offset, len int) ([]*dbmodel.Tenants, error)
|
||||
GetTenantsByName(name string) (*dbmodel.Tenants, error)
|
||||
GetTenantsByUUID(uuid string) (*dbmodel.Tenants, error)
|
||||
GetTenantsName() ([]string, error)
|
||||
@ -36,8 +36,9 @@ type TenantHandler interface {
|
||||
TotalMemCPU(services []*dbmodel.TenantServices) (*api_model.StatsInfo, error)
|
||||
//QueryTsdb(md *api_model.MontiorData) (*tsdbClient.QueryResponse, error)
|
||||
HTTPTsdb(md *api_model.MontiorData) ([]byte, error)
|
||||
GetTenantsResources(tr *api_model.TenantResources) ([]*map[string]interface{}, error)
|
||||
GetTenantsResources(tr *api_model.TenantResources) ([]map[string]interface{}, error)
|
||||
TenantsSum() (int, error)
|
||||
GetProtocols() ([]*dbmodel.RegionProcotols, *util.APIHandleError)
|
||||
}
|
||||
|
||||
var defaultTenantHandler TenantHandler
|
||||
|
@ -94,6 +94,7 @@ CREATE:
|
||||
rui := &dbmodel.RegionUserInfo{
|
||||
EID: gt.Body.EID,
|
||||
RegionTag: c.RegionTag,
|
||||
APIRange: gt.Body.Range,
|
||||
ValidityPeriod: gt.Body.ValidityPeriod,
|
||||
Token: token,
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/jinzhu/gorm"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
@ -255,31 +256,63 @@ func (p *PluginAction) ImageBuildPlugin(b *api_model.BuildPluginStruct, plugin *
|
||||
//TODO: build_version create in console
|
||||
//diffStr := fmt.Sprintf("%s%s%s%s", b.TenantName, plugin.ImageURL, b.PluginID, time.Now().Format(time.RFC3339))
|
||||
//buildVersion := createVersionID([]byte(diffStr))
|
||||
pbv := &dbmodel.TenantPluginBuildVersion{
|
||||
VersionID: b.Body.BuildVersion,
|
||||
PluginID: b.PluginID,
|
||||
Kind: plugin.BuildModel,
|
||||
BaseImage: plugin.ImageURL,
|
||||
ContainerCPU: b.Body.PluginCPU,
|
||||
ContainerMemory: b.Body.PluginMemory,
|
||||
ContainerCMD: b.Body.PluginCMD,
|
||||
BuildTime: time.Now().Format(time.RFC3339),
|
||||
Info: b.Body.Info,
|
||||
Status: "building",
|
||||
}
|
||||
if b.Body.PluginCPU == 0 {
|
||||
pbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
pbv.ContainerMemory = 50
|
||||
}
|
||||
tx := db.GetManager().Begin()
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).AddModel(pbv); err != nil {
|
||||
if !strings.Contains(err.Error(), "exist") {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
rebuild := false
|
||||
tpbv, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(
|
||||
b.PluginID, b.Body.BuildVersion)
|
||||
if err != nil {
|
||||
if err.Error() == gorm.ErrRecordNotFound.Error(){
|
||||
rebuild = false
|
||||
}else{
|
||||
return nil, err
|
||||
}
|
||||
}else{
|
||||
rebuild = true
|
||||
}
|
||||
tx := db.GetManager().Begin()
|
||||
if rebuild {
|
||||
tpbv.Info = b.Body.Info
|
||||
tpbv.Status = "building"
|
||||
tpbv.BuildTime = time.Now().Format(time.RFC3339)
|
||||
if b.Body.PluginCPU == 0 {
|
||||
tpbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
tpbv.ContainerMemory = 50
|
||||
}
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).UpdateModel(tpbv); err != nil {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}else {
|
||||
pbv := &dbmodel.TenantPluginBuildVersion{
|
||||
VersionID: b.Body.BuildVersion,
|
||||
PluginID: b.PluginID,
|
||||
Kind: plugin.BuildModel,
|
||||
BaseImage: plugin.ImageURL,
|
||||
ContainerCPU: b.Body.PluginCPU,
|
||||
ContainerMemory: b.Body.PluginMemory,
|
||||
ContainerCMD: b.Body.PluginCMD,
|
||||
BuildTime: time.Now().Format(time.RFC3339),
|
||||
Info: b.Body.Info,
|
||||
Status: "building",
|
||||
}
|
||||
if b.Body.PluginCPU == 0 {
|
||||
pbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
pbv.ContainerMemory = 50
|
||||
}
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).AddModel(pbv); err != nil {
|
||||
if !strings.Contains(err.Error(), "exist") {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
tpbv = pbv
|
||||
}
|
||||
taskBody := &builder_model.BuildPluginTaskBody{
|
||||
TenantID: b.Body.TenantID,
|
||||
@ -321,7 +354,7 @@ func (p *PluginAction) ImageBuildPlugin(b *api_model.BuildPluginStruct, plugin *
|
||||
return nil, nil
|
||||
}
|
||||
logrus.Debugf("equeue mq build plugin from image success")
|
||||
return pbv, nil
|
||||
return tpbv, nil
|
||||
}
|
||||
|
||||
//DockerfileBuildPlugin DockerfileBuildPlugin
|
||||
@ -339,32 +372,64 @@ func (p *PluginAction) DockerfileBuildPlugin(b *api_model.BuildPluginStruct, plu
|
||||
// TODO: build_version create in console
|
||||
// diffStr := fmt.Sprintf("%s%s%s%s", b.TenantName, b.Body.RepoURL, b.PluginID, time.Now().Format(time.RFC3339))
|
||||
// buildVersion := createVersionID([]byte(diffStr))
|
||||
pbv := &dbmodel.TenantPluginBuildVersion{
|
||||
VersionID: b.Body.BuildVersion,
|
||||
PluginID: b.PluginID,
|
||||
Kind: plugin.BuildModel,
|
||||
Repo: b.Body.RepoURL,
|
||||
GitURL: plugin.GitURL,
|
||||
Info: b.Body.Info,
|
||||
ContainerCPU: b.Body.PluginCPU,
|
||||
ContainerMemory: b.Body.PluginMemory,
|
||||
ContainerCMD: b.Body.PluginCMD,
|
||||
BuildTime: time.Now().Format(time.RFC3339),
|
||||
Status: "building",
|
||||
}
|
||||
if b.Body.PluginCPU == 0 {
|
||||
pbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
pbv.ContainerMemory = 50
|
||||
}
|
||||
tx := db.GetManager().Begin()
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).AddModel(pbv); err != nil {
|
||||
if !strings.Contains(err.Error(), "exist") {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
rebuild := false
|
||||
tpbv, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(
|
||||
b.PluginID, b.Body.BuildVersion)
|
||||
if err != nil {
|
||||
if err.Error() == gorm.ErrRecordNotFound.Error(){
|
||||
rebuild = false
|
||||
}else{
|
||||
return nil, err
|
||||
}
|
||||
}else{
|
||||
rebuild = true
|
||||
}
|
||||
tx := db.GetManager().Begin()
|
||||
if rebuild {
|
||||
tpbv.Info = b.Body.Info
|
||||
tpbv.Status = "building"
|
||||
tpbv.BuildTime = time.Now().Format(time.RFC3339)
|
||||
if b.Body.PluginCPU == 0 {
|
||||
tpbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
tpbv.ContainerMemory = 50
|
||||
}
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).UpdateModel(tpbv); err != nil {
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}else{
|
||||
pbv := &dbmodel.TenantPluginBuildVersion{
|
||||
VersionID: b.Body.BuildVersion,
|
||||
PluginID: b.PluginID,
|
||||
Kind: plugin.BuildModel,
|
||||
Repo: b.Body.RepoURL,
|
||||
GitURL: plugin.GitURL,
|
||||
Info: b.Body.Info,
|
||||
ContainerCPU: b.Body.PluginCPU,
|
||||
ContainerMemory: b.Body.PluginMemory,
|
||||
ContainerCMD: b.Body.PluginCMD,
|
||||
BuildTime: time.Now().Format(time.RFC3339),
|
||||
Status: "building",
|
||||
}
|
||||
if b.Body.PluginCPU == 0 {
|
||||
pbv.ContainerCPU = 125
|
||||
}
|
||||
if b.Body.PluginMemory == 0 {
|
||||
pbv.ContainerMemory = 50
|
||||
}
|
||||
if err := db.GetManager().TenantPluginBuildVersionDaoTransactions(tx).AddModel(pbv); err != nil {
|
||||
if !strings.Contains(err.Error(), "exist") {
|
||||
tx.Rollback()
|
||||
logrus.Errorf("build plugin error: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
tpbv = pbv
|
||||
}
|
||||
taskBody := &builder_model.BuildPluginTaskBody{
|
||||
TenantID: b.Body.TenantID,
|
||||
@ -407,7 +472,7 @@ func (p *PluginAction) DockerfileBuildPlugin(b *api_model.BuildPluginStruct, plu
|
||||
return nil, nil
|
||||
}
|
||||
logrus.Debugf("equeue mq build plugin from dockerfile success")
|
||||
return pbv, nil
|
||||
return tpbv, nil
|
||||
}
|
||||
|
||||
//GetAllPluginBuildVersions GetAllPluginBuildVersions
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/api/option"
|
||||
api_db "github.com/goodrain/rainbond/pkg/api/db"
|
||||
"github.com/goodrain/rainbond/pkg/api/util"
|
||||
"github.com/goodrain/rainbond/pkg/mq/api/grpc/pb"
|
||||
|
||||
api_model "github.com/goodrain/rainbond/pkg/api/model"
|
||||
@ -107,7 +108,6 @@ func (t *TenantAction) TotalMemCPU(services []*dbmodel.TenantServices) (*api_mod
|
||||
cpus := 0
|
||||
mem := 0
|
||||
for _, service := range services {
|
||||
|
||||
logrus.Debugf("service is %s, cpus is %v, mem is %v", service.ID, service.ContainerCPU, service.ContainerMemory)
|
||||
cpus += service.ContainerCPU
|
||||
mem += service.ContainerMemory
|
||||
@ -224,8 +224,9 @@ func (t *TenantAction) HTTPTsdb(md *api_model.MontiorData) ([]byte, error) {
|
||||
}
|
||||
|
||||
//GetTenantsResources GetTenantsResources
|
||||
func (t *TenantAction) GetTenantsResources(tr *api_model.TenantResources) ([]*map[string]interface{}, error) {
|
||||
func (t *TenantAction) GetTenantsResources(tr *api_model.TenantResources) ([]map[string]interface{}, error) {
|
||||
//返回全部资源
|
||||
//TODO: 应用关闭,硬盘存储资源仍会占用
|
||||
return db.GetManager().TenantServiceDao().GetCPUAndMEM(tr.Body.TenantName)
|
||||
}
|
||||
|
||||
@ -237,3 +238,12 @@ func (t *TenantAction) TenantsSum() (int, error) {
|
||||
}
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
//GetProtocols GetProtocols
|
||||
func (t *TenantAction) GetProtocols() ([]*dbmodel.RegionProcotols, *util.APIHandleError) {
|
||||
rps, err := db.GetManager().RegionProcotolsDao().GetAllSupportProtocol("v2")
|
||||
if err != nil {
|
||||
return nil, util.CreateAPIHandleErrorFromDBError("get all support protocols", err)
|
||||
}
|
||||
return rps, nil
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/goodrain/rainbond/cmd/api/option"
|
||||
api_model "github.com/goodrain/rainbond/pkg/api/model"
|
||||
"github.com/goodrain/rainbond/pkg/api/util"
|
||||
@ -108,6 +109,7 @@ func (t *TokenIdenAction) DeleteAPIManager(am *api_model.APIManager) *util.APIHa
|
||||
//CheckToken CheckToken
|
||||
func (t *TokenIdenAction) CheckToken(token, uri string) bool {
|
||||
m := GetDefaultTokenMap()
|
||||
logrus.Debugf("default token map is %v", m)
|
||||
regionInfo, ok := m[token]
|
||||
if !ok {
|
||||
return false
|
||||
@ -124,24 +126,26 @@ func (t *TokenIdenAction) CheckToken(token, uri string) bool {
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
rc := false
|
||||
for _, urinfo := range smL {
|
||||
if strings.HasPrefix(uri, urinfo.Prefix) {
|
||||
return true
|
||||
rc = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return rc
|
||||
case dbmodel.NODEMANAGER:
|
||||
sm := GetDefaultSourceURI()
|
||||
smL, ok := sm[dbmodel.NODEMANAGER]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
rc := false
|
||||
for _, urinfo := range smL {
|
||||
if strings.HasPrefix(uri, urinfo.Prefix) {
|
||||
return true
|
||||
rc = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return rc
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -1227,3 +1227,11 @@ type TenantServiceVolume struct {
|
||||
VolumePath string `gorm:"column:volume_path" json:"volume_path" validate:"volume_path|required"`
|
||||
IsReadOnly bool `gorm:"column:is_read_only;default:false" json:"is_read_only" validate:"is_read_only|bool"`
|
||||
}
|
||||
|
||||
// GetSupportProtocols GetSupportProtocols
|
||||
// swagger:parameters getSupportProtocols
|
||||
type GetSupportProtocols struct {
|
||||
// in: path
|
||||
// required: true
|
||||
TenantName string `json:"tenant_name"`
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/api/apiRouters/doc"
|
||||
"github.com/goodrain/rainbond/pkg/api/apiRouters/license"
|
||||
"github.com/goodrain/rainbond/pkg/api/proxy"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/api/apiRouters/cloud"
|
||||
"github.com/goodrain/rainbond/pkg/api/apiRouters/version2"
|
||||
@ -43,11 +44,12 @@ import (
|
||||
|
||||
//Manager apiserver
|
||||
type Manager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
conf option.Config
|
||||
stopChan chan struct{}
|
||||
r *chi.Mux
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
conf option.Config
|
||||
stopChan chan struct{}
|
||||
r *chi.Mux
|
||||
prometheusProxy proxy.Proxy
|
||||
}
|
||||
|
||||
//NewManager newManager
|
||||
@ -73,6 +75,7 @@ func NewManager(c option.Config) *Manager {
|
||||
//simple api version
|
||||
r.Use(apimiddleware.APIVersion)
|
||||
r.Use(apimiddleware.Proxy)
|
||||
|
||||
return &Manager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
@ -118,6 +121,10 @@ func (m *Manager) Run() {
|
||||
m.r.Mount("/license", license.Routes())
|
||||
//兼容老版docker
|
||||
m.r.Get("/v1/etcd/event-log/instances", m.EventLogInstance)
|
||||
|
||||
//prometheus单节点代理
|
||||
m.r.Get("/api/v1/query", m.PrometheusAPI)
|
||||
m.r.Get("/api/v1/query_range", m.PrometheusAPI)
|
||||
//开启对浏览器的websocket服务和文件服务
|
||||
go func() {
|
||||
websocketRouter := chi.NewRouter()
|
||||
@ -172,3 +179,11 @@ func (m *Manager) EventLogInstance(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
//PrometheusAPI prometheus api 代理
|
||||
func (m *Manager) PrometheusAPI(w http.ResponseWriter, r *http.Request) {
|
||||
if m.prometheusProxy == nil {
|
||||
m.prometheusProxy = proxy.CreateProxy("prometheus", "http", []string{"127.0.0.1:9999"})
|
||||
}
|
||||
m.prometheusProxy.Proxy(w, r)
|
||||
}
|
||||
|
@ -54,13 +54,13 @@ func ShowExec(command string, params []string, logger ...event.Logger) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return err
|
||||
stderr, _ := cmd.StderrPipe()
|
||||
errC := cmd.Start()
|
||||
if errC != nil {
|
||||
logrus.Debugf(fmt.Sprintf("builder: %v", errC))
|
||||
logger[0].Error(fmt.Sprintf("builder:%v", errC), map[string]string{"step": "build-exector"})
|
||||
return errC
|
||||
}
|
||||
errReader := bufio.NewReader(stderr)
|
||||
|
||||
cmd.Start()
|
||||
reader := bufio.NewReader(stdout)
|
||||
go func() {
|
||||
for {
|
||||
@ -73,11 +73,25 @@ func ShowExec(command string, params []string, logger ...event.Logger) error {
|
||||
logger[0].Debug(fmt.Sprintf("builder:%v", line), map[string]string{"step": "build-exector"})
|
||||
}
|
||||
}()
|
||||
if err := cmd.Wait(); err != nil {
|
||||
errLine, _ := errReader.ReadString('\n')
|
||||
logrus.Errorf(fmt.Sprintf("builder error: %v", errLine))
|
||||
logger[0].Error(fmt.Sprintf("build Error: %v", errLine), map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
return err
|
||||
errW := cmd.Wait()
|
||||
if errW != nil {
|
||||
//bytesErr, errR := ioutil.ReadAll(stderr)
|
||||
//logrus.Debugf(fmt.Sprintf("builder error: %v", errR))
|
||||
//logrus.Debugf(fmt.Sprintf("builder error: %v", string(bytesErr)))
|
||||
//logger[0].Error(fmt.Sprintf("build Error: %v", string(bytesErr)), map[string]string{"step": "builder-exector"})
|
||||
//return errR
|
||||
go func() {
|
||||
readerr := bufio.NewReader(stderr)
|
||||
for {
|
||||
line, errL := readerr.ReadString('\n')
|
||||
if errL != nil || io.EOF == errL {
|
||||
break
|
||||
}
|
||||
logrus.Errorf(fmt.Sprintf("builder err: %v", line))
|
||||
logger[0].Error(fmt.Sprintf("builder err:%v", line), map[string]string{"step": "build-exector"})
|
||||
}
|
||||
}()
|
||||
return errW
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -19,8 +19,11 @@
|
||||
package exector
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -64,7 +67,7 @@ func (e *exectorManager) pluginDockerfileBuild(in []byte) {
|
||||
if err := db.GetManager().TenantPluginBuildVersionDao().UpdateModel(version); err != nil {
|
||||
logrus.Errorf("update version error, %v", err)
|
||||
}
|
||||
logger.Info("插件构建超时,修改插件状态失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
logger.Error("插件构建超时,修改插件状态失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
@ -73,24 +76,21 @@ func (e *exectorManager) pluginDockerfileBuild(in []byte) {
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
err := e.runD(&tb, config, logger)
|
||||
if err != nil {
|
||||
logrus.Errorf("exec plugin build from image error:%s", err.Error())
|
||||
if retry < 3 {
|
||||
logger.Info("dockerfile构建插件任务执行失败,开始重试", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
} else {
|
||||
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(tb.PluginID, tb.VersionID)
|
||||
if err != nil {
|
||||
logrus.Errorf("get version error, %v", err)
|
||||
}
|
||||
version.Status = "failure"
|
||||
if err := db.GetManager().TenantPluginBuildVersionDao().UpdateModel(version); err != nil {
|
||||
logrus.Errorf("update version error, %v", err)
|
||||
}
|
||||
logger.Info("dockerfile构建插件任务执行失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
}
|
||||
logrus.Errorf("exec plugin build from dockerfile error:%s", err.Error())
|
||||
logger.Info("dockerfile构建插件任务执行失败,开始重试", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
} else {
|
||||
break
|
||||
return
|
||||
}
|
||||
}
|
||||
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(tb.PluginID, tb.VersionID)
|
||||
if err != nil {
|
||||
logrus.Errorf("get version error, %v", err)
|
||||
}
|
||||
version.Status = "failure"
|
||||
if err := db.GetManager().TenantPluginBuildVersionDao().UpdateModel(version); err != nil {
|
||||
logrus.Errorf("update version error, %v", err)
|
||||
}
|
||||
logger.Error("dockerfile构建插件任务执行失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
}()
|
||||
}
|
||||
|
||||
@ -102,12 +102,12 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, c parseConfig.Config
|
||||
t.Repo = "master"
|
||||
}
|
||||
if err := clone(t.GitURL, sourceDir, logger, t.Repo); err != nil {
|
||||
logger.Info("拉取代码失败", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logger.Error("拉取代码失败", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logrus.Errorf("拉取代码失败,%v", err)
|
||||
return err
|
||||
}
|
||||
if !checkDockerfile(sourceDir) {
|
||||
logger.Info("代码未检测到dockerfile,暂不支持构建,任务即将退出", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logger.Error("代码未检测到dockerfile,暂不支持构建,任务即将退出", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logrus.Error("代码未检测到dockerfile")
|
||||
return fmt.Errorf("have no dockerfile")
|
||||
}
|
||||
@ -121,7 +121,7 @@ func (e *exectorManager) runD(t *model.BuildPluginTaskBody, c parseConfig.Config
|
||||
logger.Info(fmt.Sprintf("镜像编译完成,开始推送镜像,镜像名为 %s", curImage), map[string]string{"step": "build-exector"})
|
||||
|
||||
if err := push(curImage, logger); err != nil {
|
||||
logger.Info("推送镜像失败", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logger.Error("推送镜像失败", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
logrus.Error("推送镜像失败")
|
||||
return err
|
||||
}
|
||||
@ -161,17 +161,68 @@ func clone(gitURL string, sourceDir string, logger event.Logger, repo string) er
|
||||
logrus.Debugf("file check error: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Debugf("pull: %s", fmt.Sprintf("cd %s & sudo -P git pull", sourceDir))
|
||||
mm := []string{"-P", "cd", sourceDir, "&", "sudo", "-P", "git", "pull"}
|
||||
if err := ShowExec("sudo", mm, logger); err != nil {
|
||||
return err
|
||||
} else {
|
||||
logrus.Debugf("pull: %s", fmt.Sprintf("sudo -P git -C %s pull", sourceDir))
|
||||
mm := []string{"-C", sourceDir, "pull"}
|
||||
if err := ShowExec("git", mm, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkGitDir(sourceDir string, logger event.Logger) {
|
||||
|
||||
func gitclone(gitURL string, sourceDir string, logger event.Logger, repo string) error {
|
||||
path := fmt.Sprintf("%s/.git/config", sourceDir)
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Debugf("clone: %s", fmt.Sprintf("git clone -b %s %s %s", repo, gitURL, sourceDir))
|
||||
mm := []string{"-P", "git", "clone", "-b", repo, gitURL, sourceDir}
|
||||
cmd := exec.Command("sudo", mm...)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logrus.Errorf(fmt.Sprintf("builder err: %v", err))
|
||||
return err
|
||||
}
|
||||
errC := cmd.Start()
|
||||
if errC != nil {
|
||||
logrus.Debugf(fmt.Sprintf("builder: %v", errC))
|
||||
logger.Error(fmt.Sprintf("builder:%v", errC), map[string]string{"step": "build-exector"})
|
||||
return errC
|
||||
}
|
||||
reader := bufio.NewReader(stdout)
|
||||
go func() {
|
||||
for {
|
||||
line, errL := reader.ReadString('\n')
|
||||
if errL != nil || io.EOF == errL {
|
||||
break
|
||||
}
|
||||
//fmt.Print(line)
|
||||
logrus.Debugf(fmt.Sprintf("builder: %v", line))
|
||||
logger.Debug(fmt.Sprintf("builder:%v", line), map[string]string{"step": "build-exector"})
|
||||
}
|
||||
}()
|
||||
errW := cmd.Wait()
|
||||
logrus.Debugf("errw is %v", errW)
|
||||
if errW != nil {
|
||||
cierr := strings.Split(errW.Error(), "\n")
|
||||
if strings.Contains(errW.Error(), "Cloning into") && len(cierr) < 3 {
|
||||
logrus.Errorf(fmt.Sprintf("builder:%v", errW))
|
||||
logger.Error(fmt.Sprintf("builder:%v", errW), map[string]string{"step": "build-exector"})
|
||||
return errW
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("file check error: %v", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("pull: %s", fmt.Sprintf("sudo -P git -C %s pull", sourceDir))
|
||||
mm := []string{"-P", "git", "-C", sourceDir, "pull"}
|
||||
if err := ShowExec("sudo", mm, logger); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDockerfile(sourceDir string) bool {
|
||||
|
@ -92,23 +92,20 @@ func (e *exectorManager) pluginImageBuild(in []byte) {
|
||||
err := e.run(&tb, config, logger)
|
||||
if err != nil {
|
||||
logrus.Errorf("exec plugin build from image error:%s", err.Error())
|
||||
if retry < 3 {
|
||||
logger.Info("镜像构建插件任务执行失败,开始重试", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
} else {
|
||||
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(tb.PluginID, tb.VersionID)
|
||||
if err != nil {
|
||||
logrus.Errorf("get version error, %v", err)
|
||||
}
|
||||
version.Status = "failure"
|
||||
if err := db.GetManager().TenantPluginBuildVersionDao().UpdateModel(version); err != nil {
|
||||
logrus.Errorf("update version error, %v", err)
|
||||
}
|
||||
logger.Info("镜像构建插件任务执行失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
}
|
||||
logger.Info("镜像构建插件任务执行失败,开始重试", map[string]string{"step": "builder-exector", "status": "failure"})
|
||||
} else {
|
||||
break
|
||||
return
|
||||
}
|
||||
}
|
||||
version, err := db.GetManager().TenantPluginBuildVersionDao().GetBuildVersionByVersionID(tb.PluginID, tb.VersionID)
|
||||
if err != nil {
|
||||
logrus.Errorf("get version error, %v", err)
|
||||
}
|
||||
version.Status = "failure"
|
||||
if err := db.GetManager().TenantPluginBuildVersionDao().UpdateModel(version); err != nil {
|
||||
logrus.Errorf("update version error, %v", err)
|
||||
}
|
||||
logger.Info("镜像构建插件任务执行失败", map[string]string{"step": "callback", "status": "failure"})
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ type TenantDao interface {
|
||||
GetTenantByUUID(uuid string) (*model.Tenants, error)
|
||||
GetTenantIDByName(tenantName string) (*model.Tenants, error)
|
||||
GetALLTenants() ([]*model.Tenants, error)
|
||||
GetPagedTenants(offset,len int) ([]*model.Tenants, error)
|
||||
GetPagedTenants(offset, len int) ([]*model.Tenants, error)
|
||||
}
|
||||
|
||||
//LicenseDao LicenseDao
|
||||
@ -64,8 +64,8 @@ type TenantServiceDao interface {
|
||||
GetServicesByTenantID(tenantID string) ([]*model.TenantServices, error)
|
||||
GetServicesAllInfoByTenantID(tenantID string) ([]*model.TenantServices, error)
|
||||
DeleteServiceByServiceID(serviceID string) error
|
||||
GetCPUAndMEM(tenantName []string) ([]*map[string]interface{}, error)
|
||||
GetPagedTenantService(offset,len int) ([]map[string]interface{}, error)
|
||||
GetCPUAndMEM(tenantName []string) ([]map[string]interface{}, error)
|
||||
GetPagedTenantService(offset, len int) ([]map[string]interface{}, error)
|
||||
GetTenantServiceRes(uuid string) (map[string]interface{}, error)
|
||||
}
|
||||
|
||||
@ -323,3 +323,10 @@ type RegionAPIClassDao interface {
|
||||
GetPrefixesByClass(apiClass string) ([]*model.RegionAPIClass, error)
|
||||
DeletePrefixInClass(apiClass, prefix string) error
|
||||
}
|
||||
|
||||
//RegionProcotolsDao RegionProcotolsDao
|
||||
type RegionProcotolsDao interface {
|
||||
Dao
|
||||
GetAllSupportProtocol(version string) ([]*model.RegionProcotols, error)
|
||||
GetProtocolGroupByProtocolChild(version, protocolChild string) (*model.RegionProcotols, error)
|
||||
}
|
||||
|
@ -95,6 +95,8 @@ type Manager interface {
|
||||
|
||||
RegionAPIClassDao() dao.RegionAPIClassDao
|
||||
RegionAPIClassDaoTransactions(db *gorm.DB) dao.RegionAPIClassDao
|
||||
|
||||
RegionProcotolsDao() dao.RegionProcotolsDao
|
||||
}
|
||||
|
||||
var defaultManager Manager
|
||||
|
54
pkg/db/model/protocol.go
Normal file
54
pkg/db/model/protocol.go
Normal file
@ -0,0 +1,54 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import "time"
|
||||
|
||||
//TableName 表名
|
||||
func (t *RegionProcotols) TableName() string {
|
||||
return "region_protocols"
|
||||
}
|
||||
|
||||
//RegionProcotols RegionProcotol
|
||||
type RegionProcotols struct {
|
||||
ID uint `gorm:"column:ID"`
|
||||
CreatedAt time.Time `gorm:"column:create_time"`
|
||||
ProtocolGroup string `gorm:"column:protocol_group;size:32;primary_key" json:"protocol_group"`
|
||||
ProtocolChild string `gorm:"column:protocol_child;size:32;primary_key" json:"protocol_child"`
|
||||
APIVersion string `gorm:"column:api_version;size:8" json:"api_version"`
|
||||
IsSupport bool `gorm:"column:is_support;default:false" json:"is_support"`
|
||||
}
|
||||
|
||||
//STREAMGROUP STREAMGROUP
|
||||
var STREAMGROUP = "stream"
|
||||
|
||||
//HTTPGROUP HTTPGROUP
|
||||
var HTTPGROUP = "http"
|
||||
|
||||
//MYSQLPROTOCOL MYSQLPROTOCOL
|
||||
var MYSQLPROTOCOL = "mysql"
|
||||
|
||||
//UDPPROTOCOL UDPPROTOCOL
|
||||
var UDPPROTOCOL = "udp"
|
||||
|
||||
//TCPPROTOCOL TCPPROTOCOL
|
||||
var TCPPROTOCOL = "tcp"
|
||||
|
||||
//V2VERSION region version
|
||||
var V2VERSION = "v2"
|
@ -19,14 +19,12 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/goodrain/rainbond/pkg/db/model"
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
//AddModel AddModel
|
||||
|
78
pkg/db/mysql/dao/protocol.go
Normal file
78
pkg/db/mysql/dao/protocol.go
Normal file
@ -0,0 +1,78 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/db/model"
|
||||
)
|
||||
|
||||
//RegionProcotolsDaoImpl RegionProcotolsDaoImpl
|
||||
type RegionProcotolsDaoImpl struct {
|
||||
DB *gorm.DB
|
||||
}
|
||||
|
||||
//AddModel 添加cloud信息
|
||||
func (t *RegionProcotolsDaoImpl) AddModel(mo model.Interface) error {
|
||||
info := mo.(*model.RegionProcotols)
|
||||
var oldInfo model.RegionProcotols
|
||||
if ok := t.DB.Where("protocol_group = ? and ProtocolChild = ?", info.ProtocolGroup, info.ProtocolChild).Find(&oldInfo).RecordNotFound(); ok {
|
||||
if err := t.DB.Create(info).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("prococol group %s or child %s is exist", info.ProtocolGroup, info.ProtocolChild)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//UpdateModel 更新cloud信息
|
||||
func (t *RegionProcotolsDaoImpl) UpdateModel(mo model.Interface) error {
|
||||
info := mo.(*model.RegionProcotols)
|
||||
if info.ID == 0 {
|
||||
return fmt.Errorf("region protocol id can not be empty when update ")
|
||||
}
|
||||
if err := t.DB.Save(info).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//GetAllSupportProtocol 获取当前数据中心支持的所有协议
|
||||
func (t *RegionProcotolsDaoImpl) GetAllSupportProtocol(version string) ([]*model.RegionProcotols, error) {
|
||||
var rpss []*model.RegionProcotols
|
||||
if err := t.DB.Where("api_version= ? and is_support = 1", version).Find(&rpss).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rpss, nil
|
||||
}
|
||||
|
||||
//GetProtocolGroupByProtocolChild 获取协议族名称
|
||||
func (t *RegionProcotolsDaoImpl) GetProtocolGroupByProtocolChild(
|
||||
version,
|
||||
protocolChild string) (*model.RegionProcotols, error) {
|
||||
var rps model.RegionProcotols
|
||||
if err := t.DB.Where("api_version=? and protocol_child = ?", version, protocolChild).Find(&rps).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rps, nil
|
||||
}
|
@ -23,6 +23,8 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/db/model"
|
||||
|
||||
@ -135,7 +137,7 @@ func (t *TenantServicesDaoImpl) GetServiceByID(serviceID string) (*model.TenantS
|
||||
|
||||
|
||||
//GetCPUAndMEM GetCPUAndMEM
|
||||
func (t *TenantServicesDaoImpl) GetCPUAndMEM(tenantName []string) ([]*map[string]interface{}, error) {
|
||||
func (t *TenantServicesDaoImpl) GetCPUAndMEM(tenantName []string) ([]map[string]interface{}, error) {
|
||||
if len(tenantName) == 0 {
|
||||
rows, err := t.DB.Raw("select sum(container_cpu) as cpu,sum(container_memory * replicas) as memory from tenant_services where service_id in (select service_id from tenant_service_status where status != 'closed' && status != 'undeploy')").Rows()
|
||||
if err != nil {
|
||||
@ -147,14 +149,14 @@ func (t *TenantServicesDaoImpl) GetCPUAndMEM(tenantName []string) ([]*map[string
|
||||
for rows.Next() {
|
||||
rows.Scan(&cpu, &mem)
|
||||
}
|
||||
var rc []*map[string]interface{}
|
||||
var rc []map[string]interface{}
|
||||
res := make(map[string]interface{})
|
||||
res["cpu"] = cpu
|
||||
res["memory"] = mem
|
||||
rc = append(rc, &res)
|
||||
rc = append(rc, res)
|
||||
return rc, nil
|
||||
}
|
||||
var rc []*map[string]interface{}
|
||||
var rc []map[string]interface{}
|
||||
for _, tenant := range tenantName {
|
||||
rows, err := t.DB.Raw("select tenant_id, sum(container_cpu) as cpu, sum(container_memory * replicas) as memory from tenant_services where service_id in (select service_id from tenant_service_status where (status != 'closed' && status != 'undeploy') && service_id in (select service_id from tenant_services where domain = (?))) group by tenant_id", tenant).Rows()
|
||||
if err != nil {
|
||||
@ -170,8 +172,15 @@ func (t *TenantServicesDaoImpl) GetCPUAndMEM(tenantName []string) ([]*map[string
|
||||
res["cpu"] = cpu
|
||||
res["memory"] = mem
|
||||
res["tenant_id"] = id
|
||||
logrus.Infof("res is $v", res)
|
||||
rc = append(rc, &res)
|
||||
dirPath := fmt.Sprintf("/grdata/tenant/%s", id)
|
||||
cmd := []string{"-sh", dirPath}
|
||||
f, err := exec.Command("du", cmd...).Output()
|
||||
if err != nil {
|
||||
f = []byte("0 xxx")
|
||||
}
|
||||
st := strings.Split(string(f), "\t")[0]
|
||||
res["disk"] = st
|
||||
rc = append(rc, res)
|
||||
}
|
||||
}
|
||||
return rc, nil
|
||||
|
@ -439,3 +439,10 @@ func (m *Manager) RegionAPIClassDaoTransactions(db *gorm.DB) dao.RegionAPIClassD
|
||||
DB: db,
|
||||
}
|
||||
}
|
||||
|
||||
//RegionProcotolsDao RegionProcotolsDao
|
||||
func (m *Manager) RegionProcotolsDao() dao.RegionProcotolsDao {
|
||||
return &mysqldao.RegionProcotolsDaoImpl{
|
||||
DB: m.db,
|
||||
}
|
||||
}
|
||||
|
@ -129,6 +129,7 @@ func (m *Manager) RegisterTableModel() {
|
||||
m.models = append(m.models, &model.RegionUserInfo{})
|
||||
m.models = append(m.models, &model.TenantServicesStreamPluginPort{})
|
||||
m.models = append(m.models, &model.RegionAPIClass{})
|
||||
m.models = append(m.models, &model.RegionProcotols{})
|
||||
}
|
||||
|
||||
//CheckTable 检测表结构
|
||||
@ -161,6 +162,7 @@ func (m *Manager) patchTable() {
|
||||
// m.db.Exec("alter table tenant_services add replica_id varchar(32)")
|
||||
// m.db.Exec("alter table tenant_services add status int(11) default 0")
|
||||
// m.db.Exec("alter table tenant_services add node_label varchar(40)")
|
||||
//权限组
|
||||
var rac model.RegionAPIClass
|
||||
if err := m.db.Where("class_level=? and prefix=?", "server_source", "/v2/show").Find(&rac).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
@ -174,6 +176,19 @@ func (m *Manager) patchTable() {
|
||||
m.db.Exec("insert into region_api_class VALUES ('','','node_manager','/v2/taskgroups','','','')")
|
||||
m.db.Exec("insert into region_api_class VALUES ('','','node_manager','/v2/tasktemps','','','')")
|
||||
m.db.Exec("insert into region_api_class VALUES ('','','node_manager','/v2/configs','','','')")
|
||||
m.db.Exec("insert into region_api_class VALUES ('','','server_source','/v2/builder','','','')")
|
||||
}
|
||||
}
|
||||
|
||||
//协议族支持
|
||||
var rps model.RegionProcotols
|
||||
if err := m.db.Where("protocol_group=? and protocol_child=?", "http", "http").Find(&rps).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
m.db.Exec("insert into region_protocols VALUES ('','','http','http','v2',1)")
|
||||
m.db.Exec("insert into region_protocols VALUES ('','','stream','mysql','v2',1)")
|
||||
m.db.Exec("insert into region_protocols VALUES ('','','stream','udp','v2',1)")
|
||||
m.db.Exec("insert into region_protocols VALUES ('','','stream','tcp','v2',1)")
|
||||
m.db.Exec("insert into region_protocols VALUES ('','','http','grpc','v2',0)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,6 +103,7 @@ func (k *KeepAlive) Start() error {
|
||||
}
|
||||
logrus.Warnf("%s lid[%x] keepAlive err: %s, try to reset...", k.Endpoint, k.LID, err.Error())
|
||||
k.LID = 0
|
||||
timer.Reset(duration)
|
||||
}()
|
||||
} else {
|
||||
if err := k.reg(); err != nil {
|
||||
|
@ -18,12 +18,25 @@
|
||||
|
||||
package nginx
|
||||
|
||||
//AddDomainS AddDomainS
|
||||
type AddDomainS struct {
|
||||
Domain string
|
||||
PoolName string
|
||||
NodeList []string
|
||||
Domain string
|
||||
HTTPS bool
|
||||
TransferHTTP bool
|
||||
CertificateName string
|
||||
PoolName string
|
||||
NodeList []string
|
||||
}
|
||||
|
||||
//SSLCert SSLCert
|
||||
type SSLCert struct {
|
||||
CertName string
|
||||
Key string
|
||||
CA string
|
||||
HTTPMethod HTTPMETHOD
|
||||
}
|
||||
|
||||
//AddPoolNodeS AddPoolNodeS
|
||||
type AddPoolNodeS struct {
|
||||
PoolName string
|
||||
NodeList []string
|
||||
@ -104,3 +117,11 @@ type MethodHTTPArgs struct {
|
||||
Url string
|
||||
Domain string
|
||||
}
|
||||
|
||||
type MethodHTTPSArgs struct {
|
||||
PoolName *PoolName
|
||||
UpStream []byte
|
||||
Method HTTPMETHOD
|
||||
URL string
|
||||
Domain string
|
||||
}
|
||||
|
@ -217,6 +217,9 @@ func (n *nginxAPI) AddRule(rules ...*object.RuleObject) error {
|
||||
for _, rule := range rules {
|
||||
ads.PoolName = rule.PoolName
|
||||
ads.Domain = rule.DomainName
|
||||
ads.TransferHTTP = rule.TransferHTTP
|
||||
ads.HTTPS = rule.HTTPS
|
||||
ads.CertificateName = rule.CertificateName
|
||||
nodes, err := n.ctx.Store.GetNodeByPool(ads.PoolName)
|
||||
if err != nil {
|
||||
return handleErr(append(errs, errors.New("Getnodebypool error")))
|
||||
@ -320,10 +323,28 @@ func (n *nginxAPI) GetPluginStatus() bool {
|
||||
}
|
||||
|
||||
func (n *nginxAPI) AddCertificate(cas ...*object.Certificate) error {
|
||||
return nil
|
||||
var errs []error
|
||||
for _, ca := range cas {
|
||||
ssl := SSLCert{
|
||||
CertName: ca.Name,
|
||||
Key: ca.PrivateKey,
|
||||
CA: ca.Certificate,
|
||||
HTTPMethod: MethodPOST,
|
||||
}
|
||||
errs = n.pHTTPSCert(&ssl, errs)
|
||||
}
|
||||
return handleErr(errs)
|
||||
}
|
||||
func (n *nginxAPI) DeleteCertificate(cas ...*object.Certificate) error {
|
||||
return nil
|
||||
var errs []error
|
||||
for _, ca := range cas {
|
||||
ssl := SSLCert{
|
||||
CertName: ca.Name,
|
||||
HTTPMethod: MethodDELETE,
|
||||
}
|
||||
errs = n.pHTTPSCert(&ssl, errs)
|
||||
}
|
||||
return handleErr(errs)
|
||||
}
|
||||
|
||||
type nginxAPI struct {
|
||||
@ -439,6 +460,21 @@ func (n *nginxAPI) addDomain(ads *AddDomainS) bool {
|
||||
}
|
||||
n.pHTTP(pha)
|
||||
if !bytes.HasPrefix([]byte(ads.Domain), []byte(fmt.Sprintf("%s.%s", p.Port, p.Servicename))) {
|
||||
if ads.HTTPS && ads.CertificateName != "" {
|
||||
httpsInfo := bytes.NewBuffer(nil)
|
||||
httpsInfo.WriteString(`https=https`)
|
||||
httpsInfo.WriteString(fmt.Sprintf(`&cert_name=%s&`, ads.CertificateName))
|
||||
httpsInfo.WriteString(string(upstream))
|
||||
logrus.Debugf("https info is %v", string(httpsInfo.Bytes()))
|
||||
pha.UpStream = httpsInfo.Bytes()
|
||||
} else if ads.TransferHTTP && ads.CertificateName != "" {
|
||||
httpsInfo := bytes.NewBuffer(nil)
|
||||
httpsInfo.WriteString(`https=tran_https`)
|
||||
httpsInfo.WriteString(fmt.Sprintf(`&cert_name=%s&`, ads.CertificateName))
|
||||
httpsInfo.WriteString(string(upstream))
|
||||
logrus.Debugf("trans https info is %v", string(httpsInfo.Bytes()))
|
||||
pha.UpStream = httpsInfo.Bytes()
|
||||
}
|
||||
n.pHTTPDomain(ads.Domain, pha)
|
||||
}
|
||||
return true
|
||||
@ -756,6 +792,29 @@ func (n *nginxAPI) pHTTPDomain(domain string, p *MethodHTTPArgs) {
|
||||
}
|
||||
}
|
||||
|
||||
func (n *nginxAPI) pHTTPSCert(ssl *SSLCert, errs []error) []error {
|
||||
for _, baseURL := range splitURL(n.ctx.Option["httpapi"]) {
|
||||
url := fmt.Sprintf("%s/ssl/cert/%s", baseURL, ssl.CertName)
|
||||
logrus.Debugf("phttps cert url is %s, method is %v", url, ssl.HTTPMethod)
|
||||
certInfo := bytes.NewBuffer(nil)
|
||||
certInfo.WriteString(fmt.Sprintf(`cert_name=%s`, ssl.CertName))
|
||||
if ssl.HTTPMethod == MethodPOST {
|
||||
transCA := strings.Replace(ssl.CA, "+", "%2B", -1)
|
||||
transKey := strings.Replace(ssl.Key, "+", "%2B", -1)
|
||||
certInfo.WriteString(fmt.Sprintf(`&ca=%s`, transCA))
|
||||
certInfo.WriteString(fmt.Sprintf(`&key=%s`, transKey))
|
||||
}
|
||||
logrus.Debugf("cert info is %v", string(certInfo.Bytes()))
|
||||
resp, err := n.urlPPAction(ssl.HTTPMethod, url, certInfo.Bytes())
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
logrus.Error(err)
|
||||
}
|
||||
logrus.Debug(resp)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (n *nginxAPI) pUpStreamServer(p *MethodHTTPArgs) {
|
||||
for _, baseURL := range splitURL(n.ctx.Option["streamapi"]) {
|
||||
url := fmt.Sprintf("%s/upstream/server/%s/%s/%s", baseURL, p.PoolName.Port, p.PoolName.Servicename, p.PoolName.Tenantname)
|
||||
|
@ -101,7 +101,9 @@ func GetManager() Manager {
|
||||
|
||||
//CloseManager 关闭日志服务
|
||||
func CloseManager() {
|
||||
defaultManager.Close()
|
||||
if defaultManager != nil {
|
||||
defaultManager.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) Start() error {
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -63,9 +62,10 @@ type LogConf struct {
|
||||
}
|
||||
|
||||
type EntryConf struct {
|
||||
EventLogServer EventLogServerConf
|
||||
DockerLogServer DockerLogServerConf
|
||||
MonitorMessageServer MonitorMessageServerConf
|
||||
EventLogServer EventLogServerConf
|
||||
DockerLogServer DockerLogServerConf
|
||||
MonitorMessageServer MonitorMessageServerConf
|
||||
NewMonitorMessageServerConf NewMonitorMessageServerConf
|
||||
}
|
||||
|
||||
type EventLogServerConf struct {
|
||||
@ -126,3 +126,8 @@ type MonitorMessageServerConf struct {
|
||||
SubSubscribe string
|
||||
CacheMessageSize int
|
||||
}
|
||||
|
||||
type NewMonitorMessageServerConf struct {
|
||||
ListenerHost string
|
||||
ListenerPort int
|
||||
}
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -44,6 +43,8 @@ const (
|
||||
EventMessage ClusterMessageType = "event_log"
|
||||
//ServiceMonitorMessage 业务监控数据消息
|
||||
ServiceMonitorMessage ClusterMessageType = "monitor_message"
|
||||
//ServiceNewMonitorMessage 新业务监控数据消息
|
||||
ServiceNewMonitorMessage ClusterMessageType = "new_monitor_message"
|
||||
//MonitorMessage 节点监控数据
|
||||
MonitorMessage ClusterMessageType = "monitor"
|
||||
)
|
||||
|
105
pkg/eventlog/entry/new_monitor_message_server.go
Normal file
105
pkg/eventlog/entry/new_monitor_message_server.go
Normal file
@ -0,0 +1,105 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package entry
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/conf"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/store"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
//NMonitorMessageServer 新性能分析实时数据接受服务
|
||||
type NMonitorMessageServer struct {
|
||||
conf conf.NewMonitorMessageServerConf
|
||||
log *logrus.Entry
|
||||
cancel func()
|
||||
context context.Context
|
||||
storemanager store.Manager
|
||||
messageChan chan []byte
|
||||
listenErr chan error
|
||||
serverLock sync.Mutex
|
||||
stopReceiveMessage bool
|
||||
listener *net.UDPConn
|
||||
}
|
||||
|
||||
//NewNMonitorMessageServer 创建UDP服务端
|
||||
func NewNMonitorMessageServer(conf conf.NewMonitorMessageServerConf, log *logrus.Entry, storeManager store.Manager) (*NMonitorMessageServer, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s := &NMonitorMessageServer{
|
||||
conf: conf,
|
||||
log: log,
|
||||
cancel: cancel,
|
||||
context: ctx,
|
||||
storemanager: storeManager,
|
||||
listenErr: make(chan error),
|
||||
}
|
||||
listener, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP(conf.ListenerHost), Port: conf.ListenerPort})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("UDP Server Listener: %s", listener.LocalAddr().String())
|
||||
s.listener = listener
|
||||
s.messageChan = s.storemanager.NewMonitorMessageChan()
|
||||
if s.messageChan == nil {
|
||||
return nil, errors.New("receive monitor message server can not get store message chan ")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
//Serve 执行
|
||||
func (s *NMonitorMessageServer) Serve() {
|
||||
s.handleMessage()
|
||||
}
|
||||
|
||||
//Stop 停止
|
||||
func (s *NMonitorMessageServer) Stop() {
|
||||
s.cancel()
|
||||
s.log.Info("receive new monitor message server stop")
|
||||
}
|
||||
|
||||
func (s *NMonitorMessageServer) handleMessage() {
|
||||
buf := make([]byte, 65535)
|
||||
defer s.listener.Close()
|
||||
s.log.Infoln("start receive monitor message by udp")
|
||||
for {
|
||||
n, _, err := s.listener.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
logrus.Errorf("read new monitor message from udp error,%s", err.Error())
|
||||
time.Sleep(time.Second * 2)
|
||||
continue
|
||||
}
|
||||
s.messageChan <- buf[0:n]
|
||||
}
|
||||
}
|
||||
|
||||
//ListenError listen error chan
|
||||
func (s *NMonitorMessageServer) ListenError() chan error {
|
||||
return s.listenErr
|
||||
}
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -63,9 +62,15 @@ func (e *Entry) Start() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newmonitorServer, err := NewNMonitorMessageServer(e.conf.NewMonitorMessageServerConf, e.log.WithField("server", "NewMonitorMessage"), e.storeManager)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
supervisor.Add(eventServer)
|
||||
supervisor.Add(dockerServer)
|
||||
supervisor.Add(monitorServer)
|
||||
supervisor.Add(newmonitorServer)
|
||||
supervisor.ServeBackground()
|
||||
e.supervisor = supervisor
|
||||
return nil
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -21,14 +20,15 @@ package web
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/cluster"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/cluster/discover"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/conf"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/exit/monitor"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/store"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
@ -323,6 +323,88 @@ func (s *SocketServer) pushMonitorMessage(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (s *SocketServer) pushNewMonitorMessage(w http.ResponseWriter, r *http.Request) {
|
||||
// if r.FormValue("host") == "" || r.FormValue("host") != s.cluster.GetInstanceID() {
|
||||
// w.WriteHeader(404)
|
||||
// return
|
||||
// }
|
||||
upgrader := websocket.Upgrader{
|
||||
ReadBufferSize: s.conf.ReadBufferSize,
|
||||
WriteBufferSize: s.conf.WriteBufferSize,
|
||||
EnableCompression: s.conf.EnableCompression,
|
||||
Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
|
||||
},
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return true
|
||||
},
|
||||
}
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
s.log.Error("Create web socket conn error.", err.Error())
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
_, me, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
s.log.Error("Read tag key from first message error.", err.Error())
|
||||
return
|
||||
}
|
||||
info := strings.Split(string(me), "=")
|
||||
if len(info) != 2 {
|
||||
s.log.Error("Read tag key from first message error. The data format is not correct")
|
||||
return
|
||||
}
|
||||
ServiceID := info[1]
|
||||
if ServiceID == "" {
|
||||
s.log.Error("tag key can not be empty when get socket message")
|
||||
return
|
||||
}
|
||||
s.log.Infof("Begin push monitor message of service (%s)", ServiceID)
|
||||
SubID := uuid.NewV4().String()
|
||||
ch := s.storemanager.WebSocketMessageChan("newmonitor", ServiceID, SubID)
|
||||
if ch == nil {
|
||||
// w.Write([]byte("Real-time message does not exist."))
|
||||
// w.Header().Set("Status Code", "200")
|
||||
s.log.Error("get web socket message chan from storemanager error.")
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
s.log.Debug("Push new monitor message request closed")
|
||||
s.storemanager.RealseWebSocketMessageChan("newmonitor", ServiceID, SubID)
|
||||
}()
|
||||
stop := make(chan struct{})
|
||||
go s.reader(conn, stop)
|
||||
pingTicker := time.NewTicker(s.timeout * 8 / 10)
|
||||
defer pingTicker.Stop()
|
||||
for {
|
||||
select {
|
||||
case message, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if message != nil {
|
||||
s.log.Debugf("websocket push a new monitor message")
|
||||
conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
|
||||
err = conn.WriteMessage(websocket.TextMessage, message.MonitorData)
|
||||
if err != nil {
|
||||
s.log.Warn("Push message to client error.", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-stop:
|
||||
return
|
||||
case <-s.context.Done():
|
||||
return
|
||||
case <-pingTicker.C:
|
||||
conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
|
||||
if err := conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (s *SocketServer) reader(ws *websocket.Conn, ch chan struct{}) {
|
||||
defer ws.Close()
|
||||
@ -350,6 +432,7 @@ func (s *SocketServer) listen() {
|
||||
http.HandleFunc("/event_log", s.pushEventMessage)
|
||||
http.HandleFunc("/docker_log", s.pushDockerLog)
|
||||
http.HandleFunc("/monitor_message", s.pushMonitorMessage)
|
||||
http.HandleFunc("/new_monitor_message", s.pushNewMonitorMessage)
|
||||
http.HandleFunc("/monitor", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -52,6 +51,7 @@ type Manager interface {
|
||||
DockerLogMessageChan() chan []byte
|
||||
MonitorMessageChan() chan [][]byte
|
||||
WebSocketMessageChan(mode, eventID, subID string) chan *db.EventLogMessage
|
||||
NewMonitorMessageChan() chan []byte
|
||||
RealseWebSocketMessageChan(mode, EventID, subID string)
|
||||
Run() error
|
||||
Stop()
|
||||
@ -74,48 +74,53 @@ func NewManager(conf conf.EventStoreConf, log *logrus.Entry) (Manager, error) {
|
||||
return nil, err
|
||||
}
|
||||
storeManager := &storeManager{
|
||||
cancel: cancel,
|
||||
context: ctx,
|
||||
conf: conf,
|
||||
log: log,
|
||||
receiveChan: make(chan []byte, 300),
|
||||
subChan: make(chan [][]byte, 300),
|
||||
pubChan: make(chan [][]byte, 300),
|
||||
dockerLogChan: make(chan []byte, 2048),
|
||||
monitorMessageChan: make(chan [][]byte, 100),
|
||||
chanCacheSize: 100,
|
||||
dbPlugin: dbPlugin,
|
||||
filePlugin: filePlugin,
|
||||
errChan: make(chan error),
|
||||
cancel: cancel,
|
||||
context: ctx,
|
||||
conf: conf,
|
||||
log: log,
|
||||
receiveChan: make(chan []byte, 300),
|
||||
subChan: make(chan [][]byte, 300),
|
||||
pubChan: make(chan [][]byte, 300),
|
||||
dockerLogChan: make(chan []byte, 2048),
|
||||
monitorMessageChan: make(chan [][]byte, 100),
|
||||
newmonitorMessageChan: make(chan []byte, 2048),
|
||||
chanCacheSize: 100,
|
||||
dbPlugin: dbPlugin,
|
||||
filePlugin: filePlugin,
|
||||
errChan: make(chan error),
|
||||
}
|
||||
handle := NewStore("handle", storeManager)
|
||||
read := NewStore("read", storeManager)
|
||||
docker := NewStore("docker_log", storeManager)
|
||||
monitor := NewStore("monitor", storeManager)
|
||||
newmonitor := NewStore("newmonitor", storeManager)
|
||||
storeManager.handleMessageStore = handle
|
||||
storeManager.readMessageStore = read
|
||||
storeManager.dockerLogStore = docker
|
||||
storeManager.monitorMessageStore = monitor
|
||||
storeManager.newmonitorMessageStore = newmonitor
|
||||
return storeManager, nil
|
||||
}
|
||||
|
||||
type storeManager struct {
|
||||
cancel func()
|
||||
context context.Context
|
||||
handleMessageStore MessageStore
|
||||
readMessageStore MessageStore
|
||||
dockerLogStore MessageStore
|
||||
monitorMessageStore MessageStore
|
||||
receiveChan chan []byte
|
||||
pubChan, subChan chan [][]byte
|
||||
dockerLogChan chan []byte
|
||||
monitorMessageChan chan [][]byte
|
||||
chanCacheSize int
|
||||
conf conf.EventStoreConf
|
||||
log *logrus.Entry
|
||||
dbPlugin db.Manager
|
||||
filePlugin db.Manager
|
||||
errChan chan error
|
||||
cancel func()
|
||||
context context.Context
|
||||
handleMessageStore MessageStore
|
||||
readMessageStore MessageStore
|
||||
dockerLogStore MessageStore
|
||||
monitorMessageStore MessageStore
|
||||
newmonitorMessageStore MessageStore
|
||||
receiveChan chan []byte
|
||||
pubChan, subChan chan [][]byte
|
||||
dockerLogChan chan []byte
|
||||
monitorMessageChan chan [][]byte
|
||||
newmonitorMessageChan chan []byte
|
||||
chanCacheSize int
|
||||
conf conf.EventStoreConf
|
||||
log *logrus.Entry
|
||||
dbPlugin db.Manager
|
||||
filePlugin db.Manager
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
//Scrape prometheue monitor metrics
|
||||
@ -188,6 +193,12 @@ func (s *storeManager) MonitorMessageChan() chan [][]byte {
|
||||
}
|
||||
return s.monitorMessageChan
|
||||
}
|
||||
func (s *storeManager) NewMonitorMessageChan() chan []byte {
|
||||
if s.newmonitorMessageChan == nil {
|
||||
s.newmonitorMessageChan = make(chan []byte, 2048)
|
||||
}
|
||||
return s.newmonitorMessageChan
|
||||
}
|
||||
|
||||
func (s *storeManager) WebSocketMessageChan(mode, eventID, subID string) chan *db.EventLogMessage {
|
||||
if mode == "event" {
|
||||
@ -202,6 +213,10 @@ func (s *storeManager) WebSocketMessageChan(mode, eventID, subID string) chan *d
|
||||
ch := s.monitorMessageStore.SubChan(eventID, subID)
|
||||
return ch
|
||||
}
|
||||
if mode == "newmonitor" {
|
||||
ch := s.newmonitorMessageStore.SubChan(eventID, subID)
|
||||
return ch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -211,6 +226,7 @@ func (s *storeManager) Run() error {
|
||||
s.readMessageStore.Run()
|
||||
s.dockerLogStore.Run()
|
||||
s.monitorMessageStore.Run()
|
||||
s.newmonitorMessageStore.Run()
|
||||
for i := 0; i < s.conf.HandleMessageCoreNumber; i++ {
|
||||
go s.handleReceiveMessage()
|
||||
}
|
||||
@ -223,6 +239,7 @@ func (s *storeManager) Run() error {
|
||||
for i := 0; i < s.conf.HandleMessageCoreNumber; i++ {
|
||||
go s.handleMonitorMessage()
|
||||
}
|
||||
go s.handleNewMonitorMessage()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -249,6 +266,34 @@ func (s *storeManager) parsingMessage(msg []byte, messageType string) (*db.Event
|
||||
}
|
||||
return nil, errors.New("Unable to process configuration of message format type.")
|
||||
}
|
||||
|
||||
//handleNewMonitorMessage 处理新监控数据
|
||||
func (s *storeManager) handleNewMonitorMessage() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-s.context.Done():
|
||||
return
|
||||
case msg, ok := <-s.newmonitorMessageChan:
|
||||
if !ok {
|
||||
s.log.Error("handle new monitor message core stop.monitor message log chan closed")
|
||||
break loop
|
||||
}
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
//s.log.Debugf("receive message %s", string(message.Content))
|
||||
if s.conf.ClusterMode {
|
||||
//消息直接集群共享
|
||||
s.pubChan <- [][]byte{[]byte(db.ServiceNewMonitorMessage), msg}
|
||||
}
|
||||
fmt.Println("store mange " + string(msg))
|
||||
s.newmonitorMessageStore.InsertMessage(&db.EventLogMessage{MonitorData: msg})
|
||||
}
|
||||
}
|
||||
s.errChan <- fmt.Errorf("handle monitor log core exist")
|
||||
}
|
||||
|
||||
func (s *storeManager) handleReceiveMessage() {
|
||||
s.log.Debug("event message store manager start handle receive message")
|
||||
loop:
|
||||
@ -313,6 +358,9 @@ func (s *storeManager) handleSubMessage() {
|
||||
if string(msg[0]) == string(db.ServiceMonitorMessage) {
|
||||
s.monitorMessageStore.InsertMessage(message)
|
||||
}
|
||||
if string(msg[0]) == string(db.ServiceNewMonitorMessage) {
|
||||
s.newmonitorMessageStore.InsertMessage(&db.EventLogMessage{MonitorData: msg[1]})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
334
pkg/eventlog/store/new_monitor_message_store.go
Normal file
334
pkg/eventlog/store/new_monitor_message_store.go
Normal file
@ -0,0 +1,334 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/pquerna/ffjson/ffjson"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/conf"
|
||||
"github.com/goodrain/rainbond/pkg/eventlog/db"
|
||||
)
|
||||
|
||||
type newMonitorMessageStore struct {
|
||||
conf conf.EventStoreConf
|
||||
log *logrus.Entry
|
||||
barrels map[string]*CacheMonitorMessageList
|
||||
lock sync.RWMutex
|
||||
cancel func()
|
||||
ctx context.Context
|
||||
size int64
|
||||
allLogCount float64
|
||||
}
|
||||
|
||||
func (h *newMonitorMessageStore) Scrape(ch chan<- prometheus.Metric, namespace, exporter, from string) error {
|
||||
chanDesc := prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, exporter, "new_monitor_store_barrel_count"),
|
||||
"the handle container log count size.",
|
||||
[]string{"from"}, nil,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(chanDesc, prometheus.GaugeValue, float64(len(h.barrels)), from)
|
||||
logDesc := prometheus.NewDesc(
|
||||
prometheus.BuildFQName(namespace, exporter, "new_monitor_store_log_count"),
|
||||
"the handle monitor log count size.",
|
||||
[]string{"from"}, nil,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(logDesc, prometheus.GaugeValue, h.allLogCount, from)
|
||||
|
||||
return nil
|
||||
}
|
||||
func (h *newMonitorMessageStore) insertMessage(message *db.EventLogMessage) ([]MonitorMessage, bool) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
mm := fromByte(message.MonitorData)
|
||||
if len(mm) < 1 {
|
||||
return mm, true
|
||||
}
|
||||
if mm[0].ServiceID == "" {
|
||||
return mm, true
|
||||
}
|
||||
if ba, ok := h.barrels[mm[0].ServiceID]; ok {
|
||||
ba.Insert(mm...)
|
||||
return mm, true
|
||||
}
|
||||
return mm, false
|
||||
}
|
||||
|
||||
func (h *newMonitorMessageStore) InsertMessage(message *db.EventLogMessage) {
|
||||
if message == nil {
|
||||
return
|
||||
}
|
||||
//h.log.Debug("Receive a monitor message:" + string(message.Content))
|
||||
h.size++
|
||||
h.allLogCount++
|
||||
mm, ok := h.insertMessage(message)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
ba := CreateCacheMonitorMessageList(mm[0].ServiceID)
|
||||
ba.Insert(mm...)
|
||||
h.barrels[mm[0].ServiceID] = ba
|
||||
}
|
||||
func (h *newMonitorMessageStore) GetMonitorData() *db.MonitorData {
|
||||
data := &db.MonitorData{
|
||||
ServiceSize: len(h.barrels),
|
||||
LogSizePeerM: h.size,
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (h *newMonitorMessageStore) SubChan(eventID, subID string) chan *db.EventLogMessage {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
if ba, ok := h.barrels[eventID]; ok {
|
||||
return ba.addSubChan(subID)
|
||||
}
|
||||
ba := CreateCacheMonitorMessageList(eventID)
|
||||
h.barrels[eventID] = ba
|
||||
return ba.addSubChan(subID)
|
||||
}
|
||||
func (h *newMonitorMessageStore) RealseSubChan(eventID, subID string) {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if ba, ok := h.barrels[eventID]; ok {
|
||||
ba.delSubChan(subID)
|
||||
}
|
||||
}
|
||||
func (h *newMonitorMessageStore) Run() {
|
||||
go h.Gc()
|
||||
}
|
||||
func (h *newMonitorMessageStore) Gc() {
|
||||
tiker := time.NewTicker(time.Second * 30)
|
||||
for {
|
||||
select {
|
||||
case <-tiker.C:
|
||||
case <-h.ctx.Done():
|
||||
h.log.Debug("read message store gc stop.")
|
||||
tiker.Stop()
|
||||
return
|
||||
}
|
||||
h.size = 0
|
||||
if len(h.barrels) == 0 {
|
||||
continue
|
||||
}
|
||||
var gcEvent []string
|
||||
for k, v := range h.barrels {
|
||||
if v.UpdateTime.Add(time.Minute * 3).Before(time.Now()) { // barrel 超时未收到消息
|
||||
gcEvent = append(gcEvent, k)
|
||||
}
|
||||
}
|
||||
if gcEvent != nil && len(gcEvent) > 0 {
|
||||
for _, id := range gcEvent {
|
||||
barrel := h.barrels[id]
|
||||
barrel.empty()
|
||||
delete(h.barrels, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (h *newMonitorMessageStore) stop() {
|
||||
h.cancel()
|
||||
}
|
||||
func (h *newMonitorMessageStore) InsertGarbageMessage(message ...*db.EventLogMessage) {}
|
||||
|
||||
//MonitorMessage 性能监控消息系统模型
|
||||
type MonitorMessage struct {
|
||||
ServiceID string
|
||||
Port string
|
||||
HostName string
|
||||
MessageType string //mysql,http ...
|
||||
Key string
|
||||
//总时间
|
||||
CumulativeTime float64
|
||||
AverageTime float64
|
||||
MaxTime float64
|
||||
Count uint64
|
||||
//异常请求次数
|
||||
AbnormalCount uint64
|
||||
}
|
||||
|
||||
//cacheMonitorMessage 每个实例的数据缓存
|
||||
type cacheMonitorMessage struct {
|
||||
updateTime time.Time
|
||||
hostName string
|
||||
mms []MonitorMessage
|
||||
}
|
||||
|
||||
//CacheMonitorMessageList 某个应用性能分析数据
|
||||
type CacheMonitorMessageList struct {
|
||||
list []*cacheMonitorMessage
|
||||
subSocketChan map[string]chan *db.EventLogMessage
|
||||
subLock sync.Mutex
|
||||
message db.EventLogMessage
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
||||
//CreateCacheMonitorMessageList 创建应用监控信息缓存器
|
||||
func CreateCacheMonitorMessageList(eventID string) *CacheMonitorMessageList {
|
||||
return &CacheMonitorMessageList{
|
||||
subSocketChan: make(map[string]chan *db.EventLogMessage),
|
||||
message: db.EventLogMessage{
|
||||
EventID: eventID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//Insert 认为mms的hostname一致
|
||||
//每次收到消息进行gc
|
||||
func (c *CacheMonitorMessageList) Insert(mms ...MonitorMessage) {
|
||||
if mms == nil || len(mms) < 1 {
|
||||
return
|
||||
}
|
||||
c.UpdateTime = time.Now()
|
||||
hostname := mms[0].HostName
|
||||
if len(c.list) == 0 {
|
||||
c.list = []*cacheMonitorMessage{
|
||||
&cacheMonitorMessage{
|
||||
updateTime: time.Now(),
|
||||
hostName: hostname,
|
||||
mms: mms,
|
||||
}}
|
||||
}
|
||||
var update bool
|
||||
for i := range c.list {
|
||||
cm := c.list[i]
|
||||
if cm.hostName == hostname {
|
||||
cm.updateTime = time.Now()
|
||||
cm.mms = mms
|
||||
update = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !update {
|
||||
c.list = append(c.list, &cacheMonitorMessage{
|
||||
updateTime: time.Now(),
|
||||
hostName: hostname,
|
||||
mms: mms,
|
||||
})
|
||||
}
|
||||
c.Gc()
|
||||
c.pushMessage()
|
||||
}
|
||||
|
||||
//Gc 清理数据
|
||||
func (c *CacheMonitorMessageList) Gc() {
|
||||
var list []*cacheMonitorMessage
|
||||
for i := range c.list {
|
||||
cmm := c.list[i]
|
||||
if !cmm.updateTime.Add(time.Minute * 5).Before(time.Now()) {
|
||||
list = append(list, cmm)
|
||||
}
|
||||
}
|
||||
c.list = list
|
||||
}
|
||||
|
||||
func (c *CacheMonitorMessageList) pushMessage() {
|
||||
if len(c.list) == 0 {
|
||||
return
|
||||
}
|
||||
var mdata []byte
|
||||
if len(c.list) == 1 {
|
||||
mdata = getByte(c.list[0].mms)
|
||||
}
|
||||
source := c.list[0].mms
|
||||
for i := 1; i < len(c.list); i++ {
|
||||
addSource := c.list[i].mms
|
||||
source = merge(source, addSource)
|
||||
}
|
||||
mdata = getByte(source)
|
||||
c.message.MonitorData = mdata
|
||||
for _, ch := range c.subSocketChan {
|
||||
select {
|
||||
case ch <- &c.message:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 增加socket订阅
|
||||
func (c *CacheMonitorMessageList) addSubChan(subID string) chan *db.EventLogMessage {
|
||||
c.subLock.Lock()
|
||||
defer c.subLock.Unlock()
|
||||
if sub, ok := c.subSocketChan[subID]; ok {
|
||||
return sub
|
||||
}
|
||||
ch := make(chan *db.EventLogMessage, 10)
|
||||
c.subSocketChan[subID] = ch
|
||||
c.pushMessage()
|
||||
return ch
|
||||
}
|
||||
|
||||
//删除socket订阅
|
||||
func (c *CacheMonitorMessageList) delSubChan(subID string) {
|
||||
c.subLock.Lock()
|
||||
defer c.subLock.Unlock()
|
||||
if _, ok := c.subSocketChan[subID]; ok {
|
||||
delete(c.subSocketChan, subID)
|
||||
}
|
||||
}
|
||||
func (c *CacheMonitorMessageList) empty() {
|
||||
c.subLock.Lock()
|
||||
defer c.subLock.Unlock()
|
||||
for _, v := range c.subSocketChan {
|
||||
close(v)
|
||||
}
|
||||
}
|
||||
func getByte(source []MonitorMessage) []byte {
|
||||
b, _ := ffjson.Marshal(source)
|
||||
return b
|
||||
}
|
||||
func fromByte(source []byte) []MonitorMessage {
|
||||
var mm []MonitorMessage
|
||||
ffjson.Unmarshal(source, &mm)
|
||||
return mm
|
||||
}
|
||||
|
||||
func merge(source, addsource []MonitorMessage) (result []MonitorMessage) {
|
||||
var cache = make(map[string]*MonitorMessage)
|
||||
for _, mm := range source {
|
||||
cache[mm.Key] = &mm
|
||||
}
|
||||
for _, mm := range addsource {
|
||||
if oldmm, ok := cache[mm.Key]; ok {
|
||||
oldmm.Count += mm.Count
|
||||
oldmm.AbnormalCount += mm.AbnormalCount
|
||||
//平均时间
|
||||
oldmm.AverageTime = (oldmm.AverageTime + mm.AverageTime) / 2
|
||||
//<2F><><EFBFBD>积<EFBFBD><E7A7AF><EFBFBD>间
|
||||
oldmm.CumulativeTime = oldmm.CumulativeTime + mm.CumulativeTime
|
||||
//最大时间
|
||||
if mm.MaxTime > oldmm.MaxTime {
|
||||
oldmm.MaxTime = mm.MaxTime
|
||||
}
|
||||
continue
|
||||
}
|
||||
cache[mm.Key] = &mm
|
||||
}
|
||||
for _, c := range cache {
|
||||
result = append(result, *c)
|
||||
}
|
||||
return
|
||||
}
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -141,5 +140,16 @@ func NewStore(storeType string, manager *storeManager) MessageStore {
|
||||
}
|
||||
return docker
|
||||
}
|
||||
if storeType == "newmonitor" {
|
||||
monitor := &newMonitorMessageStore{
|
||||
barrels: make(map[string]*CacheMonitorMessageList, 100),
|
||||
conf: manager.conf,
|
||||
log: manager.log.WithField("module", "NewMonitorMessageStore"),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
return monitor
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,19 +1,18 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
@ -29,6 +28,27 @@ import (
|
||||
var urlData = `
|
||||
2017-05-19 11:33:34 APPS SumTimeByUrl [{"tenant":"o2o","service":"zzcplus","url":"/active/js/wx_share.js","avgtime":"1.453","sumtime":"1.453","counts":"1"}]
|
||||
`
|
||||
var newMonitorMessage = `
|
||||
[{"ServiceID":"test",
|
||||
"Port":"5000",
|
||||
"MessageType":"http",
|
||||
"Key":"/test",
|
||||
"CumulativeTime":0.1,
|
||||
"AverageTime":0.1,
|
||||
"MaxTime":0.1,
|
||||
"Count":1,
|
||||
"AbnormalCount":0}
|
||||
,{"ServiceID":"test",
|
||||
"Port":"5000",
|
||||
"MessageType":"http",
|
||||
"Key":"/test2",
|
||||
"CumulativeTime":0.36,
|
||||
"AverageTime":0.18,
|
||||
"MaxTime":0.2,
|
||||
"Count":2,
|
||||
"AbnormalCount":2}
|
||||
]
|
||||
`
|
||||
|
||||
func BenchmarkMonitorServer(t *testing.B) {
|
||||
client, _ := zmq4.NewSocket(zmq4.PUB)
|
||||
|
@ -32,24 +32,28 @@ func TestClient(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
re, err := c.Enqueue(context.Background(), &pb.EnqueueRequest{
|
||||
Topic: "worker",
|
||||
Message: &pb.TaskMessage{
|
||||
TaskType: "stop",
|
||||
CreateTime: time.Now().Format(time.RFC3339),
|
||||
TaskBody: []byte(`{"tenant_id":"232bd923d3794b979974bb21b863608b","service_id":"37f6cc84da449882104687130e868196","deploy_version":"20170717163635","event_id":"system"}`),
|
||||
User: "barnett",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
for i := 0; i < 100000; i++ {
|
||||
re, err := c.Enqueue(context.Background(), &pb.EnqueueRequest{
|
||||
Topic: "worker",
|
||||
Message: &pb.TaskMessage{
|
||||
TaskType: "stop",
|
||||
CreateTime: time.Now().Format(time.RFC3339),
|
||||
TaskBody: []byte(`{"tenant_id":"232bd923d3794b979974bb21b863608b","service_id":"37f6cc84da449882104687130e868196","deploy_version":"20170717163635","event_id":"system"}`),
|
||||
User: "barnett",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(re)
|
||||
taskme, err := c.Dequeue(context.Background(), &pb.DequeueRequest{Topic: "worker"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(taskme)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
t.Log(re)
|
||||
taskme, err := c.Dequeue(context.Background(), &pb.DequeueRequest{Topic: "worker"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log(taskme)
|
||||
|
||||
}
|
||||
|
||||
func TestClientScaling(t *testing.T) {
|
||||
|
@ -1,31 +1,31 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mq
|
||||
|
||||
import (
|
||||
"github.com/goodrain/rainbond/cmd/mq/option"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/mq/option"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
etcdutil "github.com/goodrain/rainbond/pkg/util/etcd"
|
||||
@ -56,10 +56,10 @@ func NewActionMQ(ctx context.Context, c option.Config) ActionMQ {
|
||||
|
||||
type etcdQueue struct {
|
||||
config option.Config
|
||||
client *clientv3.Client
|
||||
ctx context.Context
|
||||
queues map[string]string
|
||||
queuesLock sync.Mutex
|
||||
client *clientv3.Client
|
||||
}
|
||||
|
||||
func (e *etcdQueue) Start() error {
|
||||
@ -109,7 +109,7 @@ func (e *etcdQueue) GetAllTopics() []string {
|
||||
|
||||
func (e *etcdQueue) Stop() error {
|
||||
if e.client != nil {
|
||||
return e.client.Close()
|
||||
e.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -25,6 +25,9 @@ import (
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/discover"
|
||||
"github.com/goodrain/rainbond/pkg/node/masterserver"
|
||||
"github.com/goodrain/rainbond/pkg/node/statsd"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/goodrain/rainbond/pkg/node/api/controller"
|
||||
"github.com/goodrain/rainbond/pkg/node/api/model"
|
||||
@ -52,21 +55,52 @@ type Manager struct {
|
||||
lID client.LeaseID // lease id
|
||||
ms *masterserver.MasterServer
|
||||
keepalive *discover.KeepAlive
|
||||
exporter *statsd.Exporter
|
||||
}
|
||||
|
||||
//NewManager api manager
|
||||
func NewManager(c option.Conf, node *model.HostNode, ms *masterserver.MasterServer) *Manager {
|
||||
func NewManager(c option.Conf, node *model.HostNode, ms *masterserver.MasterServer, exporter *statsd.Exporter) *Manager {
|
||||
r := router.Routers(c.RunMode)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
controller.Init(&c, ms)
|
||||
return &Manager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
conf: c,
|
||||
router: r,
|
||||
node: node,
|
||||
ms: ms,
|
||||
m := &Manager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
conf: c,
|
||||
router: r,
|
||||
node: node,
|
||||
ms: ms,
|
||||
exporter: exporter,
|
||||
}
|
||||
m.router.Get("/app/metrics", m.HandleStatsd)
|
||||
m.router.Get("/-/statsdreload", m.ReloadStatsdMappConfig)
|
||||
return m
|
||||
}
|
||||
|
||||
//ReloadStatsdMappConfig ReloadStatsdMappConfig
|
||||
func (m *Manager) ReloadStatsdMappConfig(w http.ResponseWriter, r *http.Request) {
|
||||
if err := m.exporter.ReloadConfig(); err != nil {
|
||||
w.Write([]byte(err.Error()))
|
||||
w.WriteHeader(500)
|
||||
} else {
|
||||
w.Write([]byte("Success reload"))
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
}
|
||||
|
||||
//HandleStatsd statsd handle
|
||||
func (m *Manager) HandleStatsd(w http.ResponseWriter, r *http.Request) {
|
||||
gatherers := prometheus.Gatherers{
|
||||
prometheus.DefaultGatherer,
|
||||
m.exporter.GetRegister(),
|
||||
}
|
||||
// Delegate http serving to Prometheus client library, which will call collector.Collect.
|
||||
h := promhttp.HandlerFor(gatherers,
|
||||
promhttp.HandlerOpts{
|
||||
ErrorLog: logrus.StandardLogger(),
|
||||
ErrorHandling: promhttp.ContinueOnError,
|
||||
})
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
//Start 启动
|
||||
|
@ -76,7 +76,6 @@ func Routers(mode string) *chi.Mux {
|
||||
r.Get("/{node_ip}/init", controller.InitStatus)
|
||||
r.Post("/{node_id}/install", controller.Install)
|
||||
|
||||
|
||||
r.Get("/{node_id}/prometheus/cpu", controller.GetCpu)
|
||||
r.Get("/{node_id}/prometheus/mem", controller.GetMem)
|
||||
r.Get("/{node_id}/prometheus/disk", controller.GetDisk)
|
||||
@ -88,7 +87,6 @@ func Routers(mode string) *chi.Mux {
|
||||
r.Put("/prometheus/expr", controller.GetExpr)
|
||||
r.Put("/prometheus/start/{start}/end/{end}/step/{step}/expr", controller.GetExpr)
|
||||
|
||||
|
||||
})
|
||||
|
||||
//TODO:
|
||||
@ -121,6 +119,6 @@ func Routers(mode string) *chi.Mux {
|
||||
}
|
||||
})
|
||||
//节点监控
|
||||
r.Get("/metrics", controller.NodeExporter)
|
||||
r.Get("/node/metrics", controller.NodeExporter)
|
||||
return r
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func (c *Client) Post(key, val string, opts ...client.OpOption) (*client.PutResp
|
||||
if !txnresp.Succeeded {
|
||||
return nil, ErrKeyExists
|
||||
}
|
||||
return txnresp.ToOpResponse().Put(), nil
|
||||
return txnresp.OpResponse().Put(), nil
|
||||
}
|
||||
|
||||
//Put etcd v3 Put
|
||||
@ -162,6 +162,11 @@ func (c *Client) Watch(key string, opts ...client.OpOption) client.WatchChan {
|
||||
return c.Client.Watch(context.Background(), key, opts...)
|
||||
}
|
||||
|
||||
//WatchByCtx watch by ctx
|
||||
func (c *Client) WatchByCtx(ctx context.Context, key string, opts ...client.OpOption) client.WatchChan {
|
||||
return c.Client.Watch(ctx, key, opts...)
|
||||
}
|
||||
|
||||
//KeepAliveOnce etcd v3 KeepAliveOnce
|
||||
func (c *Client) KeepAliveOnce(id client.LeaseID) (*client.LeaseKeepAliveResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), c.reqTimeout)
|
||||
|
@ -142,15 +142,18 @@ func (t *TaskEngine) haveMaster() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
ch := store.DefalutClient.Watch("/rainbond/task/scheduler/authority")
|
||||
ctx, cancel := context.WithTimeout(t.ctx, time.Second*3)
|
||||
ch := store.DefalutClient.WatchByCtx(ctx, "/rainbond/task/scheduler/authority")
|
||||
for {
|
||||
select {
|
||||
case <-t.ctx.Done():
|
||||
cancel()
|
||||
return false, nil
|
||||
case events := <-ch:
|
||||
for _, event := range events.Events {
|
||||
//watch 到删除操作,返回去获取权限
|
||||
if event.Type == client.EventTypeDelete {
|
||||
cancel()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
132
pkg/node/monitormessage/udpserver.go
Normal file
132
pkg/node/monitormessage/udpserver.go
Normal file
@ -0,0 +1,132 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package monitormessage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/goodrain/rainbond/pkg/discover"
|
||||
"github.com/goodrain/rainbond/pkg/discover/config"
|
||||
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
//UDPServer udp server
|
||||
type UDPServer struct {
|
||||
ListenerHost string
|
||||
ListenerPort int
|
||||
eventServerEndpoint []string
|
||||
client net.Conn
|
||||
}
|
||||
|
||||
//CreateUDPServer create udpserver
|
||||
func CreateUDPServer(lisHost string, lisPort int) *UDPServer {
|
||||
return &UDPServer{
|
||||
ListenerHost: lisHost,
|
||||
ListenerPort: lisPort,
|
||||
}
|
||||
}
|
||||
|
||||
//Start start
|
||||
func (u *UDPServer) Start() error {
|
||||
dis, err := discover.GetDiscover(config.DiscoverConfig{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dis.AddProject("event_log_event_udp", u)
|
||||
if err := u.server(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//UpdateEndpoints update event server address
|
||||
func (u *UDPServer) UpdateEndpoints(endpoints ...*config.Endpoint) {
|
||||
var eventServerEndpoint []string
|
||||
for _, e := range endpoints {
|
||||
eventServerEndpoint = append(eventServerEndpoint, e.URL)
|
||||
u.eventServerEndpoint = eventServerEndpoint
|
||||
}
|
||||
if len(u.eventServerEndpoint) > 0 {
|
||||
for i := range u.eventServerEndpoint {
|
||||
info := strings.Split(u.eventServerEndpoint[i], ":")
|
||||
if len(info) == 2 {
|
||||
dip := net.ParseIP(info[0])
|
||||
port, err := strconv.Atoi(info[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
srcAddr := &net.UDPAddr{IP: net.IPv4zero, Port: 0}
|
||||
dstAddr := &net.UDPAddr{IP: dip, Port: port}
|
||||
conn, err := net.DialUDP("udp", srcAddr, dstAddr)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
logrus.Infof("Update event server address is %s", u.eventServerEndpoint[i])
|
||||
u.client = conn
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
//Error
|
||||
func (u *UDPServer) Error(err error) {
|
||||
|
||||
}
|
||||
|
||||
//Server 服务
|
||||
func (u *UDPServer) server() error {
|
||||
listener, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP(u.ListenerHost), Port: u.ListenerPort})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return err
|
||||
}
|
||||
log.Infof("UDP Server Listener: %s", listener.LocalAddr().String())
|
||||
buf := make([]byte, 65535)
|
||||
go func() {
|
||||
defer listener.Close()
|
||||
for {
|
||||
n, _, err := listener.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
logrus.Errorf("read message from udp error,%s", err.Error())
|
||||
time.Sleep(time.Second * 2)
|
||||
continue
|
||||
}
|
||||
u.handlePacket(buf[0:n])
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *UDPServer) handlePacket(packet []byte) {
|
||||
lines := strings.Split(string(packet), "\n")
|
||||
for _, line := range lines {
|
||||
if line != "" && u.client != nil {
|
||||
u.client.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
}
|
201
pkg/node/statsd/exporter/LICENSE
Normal file
201
pkg/node/statsd/exporter/LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
60
pkg/node/statsd/exporter/Makefile
Normal file
60
pkg/node/statsd/exporter/Makefile
Normal file
@ -0,0 +1,60 @@
|
||||
# Copyright 2013 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
GO := GO15VENDOREXPERIMENT=1 go
|
||||
PROMU := $(GOPATH)/bin/promu
|
||||
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_NAME ?= statsd-exporter
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
|
||||
|
||||
all: format build test
|
||||
|
||||
style:
|
||||
@echo ">> checking code style"
|
||||
@! gofmt -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
|
||||
|
||||
test:
|
||||
@echo ">> running tests"
|
||||
@$(GO) test -short $(pkgs)
|
||||
|
||||
format:
|
||||
@echo ">> formatting code"
|
||||
@$(GO) fmt $(pkgs)
|
||||
|
||||
vet:
|
||||
@echo ">> vetting code"
|
||||
@$(GO) vet $(pkgs)
|
||||
|
||||
build: promu
|
||||
@echo ">> building binaries"
|
||||
@$(PROMU) build --prefix $(PREFIX)
|
||||
|
||||
tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
docker:
|
||||
@echo ">> building docker image"
|
||||
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
||||
|
||||
promu:
|
||||
@GOOS=$(shell uname -s | tr A-Z a-z) \
|
||||
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
|
||||
$(GO) get -u github.com/prometheus/promu
|
||||
|
||||
|
||||
.PHONY: all style format build test vet tarball docker promu
|
595
pkg/node/statsd/exporter/exporter.go
Normal file
595
pkg/node/statsd/exporter/exporter.go
Normal file
@ -0,0 +1,595 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHelp = "Metric autogenerated by statsd_exporter."
|
||||
regErrF = "A change of configuration created inconsistent metrics for " +
|
||||
"%q. You have to restart the statsd_exporter, and you should " +
|
||||
"consider the effects on your monitoring setup. Error: %s"
|
||||
)
|
||||
|
||||
var (
|
||||
illegalCharsRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
hash = fnv.New64a()
|
||||
strBuf bytes.Buffer // Used for hashing.
|
||||
intBuf = make([]byte, 8)
|
||||
)
|
||||
|
||||
// hashNameAndLabels returns a hash value of the provided name string and all
|
||||
// the label names and values in the provided labels map.
|
||||
//
|
||||
// Not safe for concurrent use! (Uses a shared buffer and hasher to save on
|
||||
// allocations.)
|
||||
func hashNameAndLabels(name string, labels prometheus.Labels) uint64 {
|
||||
hash.Reset()
|
||||
strBuf.Reset()
|
||||
strBuf.WriteString(name)
|
||||
hash.Write(strBuf.Bytes())
|
||||
binary.BigEndian.PutUint64(intBuf, model.LabelsToSignature(labels))
|
||||
hash.Write(intBuf)
|
||||
return hash.Sum64()
|
||||
}
|
||||
|
||||
type CounterContainer struct {
|
||||
Elements map[uint64]prometheus.Counter
|
||||
Register prometheus.Registerer
|
||||
}
|
||||
|
||||
func NewCounterContainer(Register prometheus.Registerer) *CounterContainer {
|
||||
return &CounterContainer{
|
||||
Elements: make(map[uint64]prometheus.Counter),
|
||||
Register: Register,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CounterContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Counter, error) {
|
||||
hash := hashNameAndLabels(metricName, labels)
|
||||
counter, ok := c.Elements[hash]
|
||||
if !ok {
|
||||
counter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
if err := c.Register.Register(counter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Elements[hash] = counter
|
||||
}
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
type GaugeContainer struct {
|
||||
Elements map[uint64]prometheus.Gauge
|
||||
Register prometheus.Registerer
|
||||
}
|
||||
|
||||
func NewGaugeContainer(Register prometheus.Registerer) *GaugeContainer {
|
||||
return &GaugeContainer{
|
||||
Elements: make(map[uint64]prometheus.Gauge),
|
||||
Register: Register,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *GaugeContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Gauge, error) {
|
||||
hash := hashNameAndLabels(metricName, labels)
|
||||
gauge, ok := c.Elements[hash]
|
||||
if !ok {
|
||||
gauge = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
if err := c.Register.Register(gauge); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Elements[hash] = gauge
|
||||
}
|
||||
return gauge, nil
|
||||
}
|
||||
|
||||
type SummaryContainer struct {
|
||||
Elements map[uint64]prometheus.Summary
|
||||
Register prometheus.Registerer
|
||||
}
|
||||
|
||||
func NewSummaryContainer(Register prometheus.Registerer) *SummaryContainer {
|
||||
return &SummaryContainer{
|
||||
Elements: make(map[uint64]prometheus.Summary),
|
||||
Register: Register,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *SummaryContainer) Get(metricName string, labels prometheus.Labels, help string) (prometheus.Summary, error) {
|
||||
hash := hashNameAndLabels(metricName, labels)
|
||||
summary, ok := c.Elements[hash]
|
||||
if !ok {
|
||||
summary = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
ConstLabels: labels,
|
||||
})
|
||||
if err := c.Register.Register(summary); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Elements[hash] = summary
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
type HistogramContainer struct {
|
||||
Elements map[uint64]prometheus.Histogram
|
||||
mapper *MetricMapper
|
||||
Register prometheus.Registerer
|
||||
}
|
||||
|
||||
func NewHistogramContainer(mapper *MetricMapper, Register prometheus.Registerer) *HistogramContainer {
|
||||
return &HistogramContainer{
|
||||
Elements: make(map[uint64]prometheus.Histogram),
|
||||
mapper: mapper,
|
||||
Register: Register,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HistogramContainer) Get(metricName string, labels prometheus.Labels, help string, mapping *metricMapping) (prometheus.Histogram, error) {
|
||||
hash := hashNameAndLabels(metricName, labels)
|
||||
histogram, ok := c.Elements[hash]
|
||||
if !ok {
|
||||
buckets := c.mapper.Defaults.Buckets
|
||||
if mapping != nil && mapping.Buckets != nil && len(mapping.Buckets) > 0 {
|
||||
buckets = mapping.Buckets
|
||||
}
|
||||
histogram = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: metricName,
|
||||
Help: help,
|
||||
ConstLabels: labels,
|
||||
Buckets: buckets,
|
||||
})
|
||||
c.Elements[hash] = histogram
|
||||
if err := c.Register.Register(histogram); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return histogram, nil
|
||||
}
|
||||
|
||||
type Event interface {
|
||||
MetricName() string
|
||||
Value() float64
|
||||
Labels() map[string]string
|
||||
}
|
||||
|
||||
type CounterEvent struct {
|
||||
metricName string
|
||||
value float64
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (c *CounterEvent) MetricName() string { return c.metricName }
|
||||
func (c *CounterEvent) Value() float64 { return c.value }
|
||||
func (c *CounterEvent) Labels() map[string]string { return c.labels }
|
||||
|
||||
type GaugeEvent struct {
|
||||
metricName string
|
||||
value float64
|
||||
relative bool
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (g *GaugeEvent) MetricName() string { return g.metricName }
|
||||
func (g *GaugeEvent) Value() float64 { return g.value }
|
||||
func (c *GaugeEvent) Labels() map[string]string { return c.labels }
|
||||
|
||||
type TimerEvent struct {
|
||||
metricName string
|
||||
value float64
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
func (t *TimerEvent) MetricName() string { return t.metricName }
|
||||
func (t *TimerEvent) Value() float64 { return t.value }
|
||||
func (c *TimerEvent) Labels() map[string]string { return c.labels }
|
||||
|
||||
type Events []Event
|
||||
|
||||
type Exporter struct {
|
||||
Counters *CounterContainer
|
||||
Gauges *GaugeContainer
|
||||
Summaries *SummaryContainer
|
||||
Histograms *HistogramContainer
|
||||
mapper *MetricMapper
|
||||
}
|
||||
|
||||
func escapeMetricName(metricName string) string {
|
||||
// If a metric starts with a digit, prepend an underscore.
|
||||
if metricName[0] >= '0' && metricName[0] <= '9' {
|
||||
metricName = "_" + metricName
|
||||
}
|
||||
|
||||
// Replace all illegal metric chars with underscores.
|
||||
metricName = illegalCharsRE.ReplaceAllString(metricName, "_")
|
||||
return metricName
|
||||
}
|
||||
|
||||
func (b *Exporter) Listen(e <-chan Events) {
|
||||
for {
|
||||
events, ok := <-e
|
||||
if !ok {
|
||||
log.Debug("Channel is closed. Break out of Exporter.Listener.")
|
||||
return
|
||||
}
|
||||
for _, event := range events {
|
||||
var help string
|
||||
metricName := ""
|
||||
prometheusLabels := event.Labels()
|
||||
|
||||
mapping, labels, present := b.mapper.getMapping(event.MetricName())
|
||||
if mapping == nil {
|
||||
mapping = &metricMapping{}
|
||||
}
|
||||
if mapping.HelpText == "" {
|
||||
help = defaultHelp
|
||||
} else {
|
||||
help = mapping.HelpText
|
||||
}
|
||||
if present {
|
||||
metricName = mapping.Name
|
||||
for label, value := range labels {
|
||||
prometheusLabels[label] = value
|
||||
}
|
||||
} else {
|
||||
eventsUnmapped.Inc()
|
||||
metricName = escapeMetricName(event.MetricName())
|
||||
}
|
||||
|
||||
switch ev := event.(type) {
|
||||
case *CounterEvent:
|
||||
// We don't accept negative values for counters. Incrementing the counter with a negative number
|
||||
// will cause the exporter to panic. Instead we will warn and continue to the next event.
|
||||
if event.Value() < 0.0 {
|
||||
log.Debugf("Counter %q is: '%f' (counter must be non-negative value)", metricName, event.Value())
|
||||
eventStats.WithLabelValues("illegal_negative_counter").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
counter, err := b.Counters.Get(
|
||||
metricName,
|
||||
prometheusLabels,
|
||||
help,
|
||||
)
|
||||
if err == nil {
|
||||
counter.Add(event.Value())
|
||||
|
||||
eventStats.WithLabelValues("counter").Inc()
|
||||
} else {
|
||||
log.Debugf(regErrF, metricName, err)
|
||||
conflictingEventStats.WithLabelValues("counter").Inc()
|
||||
}
|
||||
|
||||
case *GaugeEvent:
|
||||
gauge, err := b.Gauges.Get(
|
||||
metricName,
|
||||
prometheusLabels,
|
||||
help,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
if ev.relative {
|
||||
gauge.Add(event.Value())
|
||||
} else {
|
||||
gauge.Set(event.Value())
|
||||
}
|
||||
|
||||
eventStats.WithLabelValues("gauge").Inc()
|
||||
} else {
|
||||
log.Debugf(regErrF, metricName, err)
|
||||
conflictingEventStats.WithLabelValues("gauge").Inc()
|
||||
}
|
||||
|
||||
case *TimerEvent:
|
||||
t := timerTypeDefault
|
||||
if mapping != nil {
|
||||
t = mapping.TimerType
|
||||
}
|
||||
if t == timerTypeDefault {
|
||||
t = b.mapper.Defaults.TimerType
|
||||
}
|
||||
|
||||
switch t {
|
||||
case timerTypeHistogram:
|
||||
histogram, err := b.Histograms.Get(
|
||||
metricName,
|
||||
prometheusLabels,
|
||||
help,
|
||||
mapping,
|
||||
)
|
||||
if err == nil {
|
||||
histogram.Observe(event.Value() / 1000) // prometheus presumes seconds, statsd millisecond
|
||||
eventStats.WithLabelValues("timer").Inc()
|
||||
} else {
|
||||
log.Debugf(regErrF, metricName, err)
|
||||
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||
}
|
||||
|
||||
case timerTypeDefault, timerTypeSummary:
|
||||
summary, err := b.Summaries.Get(
|
||||
metricName,
|
||||
prometheusLabels,
|
||||
help,
|
||||
)
|
||||
if err == nil {
|
||||
summary.Observe(event.Value())
|
||||
eventStats.WithLabelValues("timer").Inc()
|
||||
} else {
|
||||
log.Debugf(regErrF, metricName, err)
|
||||
conflictingEventStats.WithLabelValues("timer").Inc()
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown timer type '%s'", t))
|
||||
}
|
||||
|
||||
default:
|
||||
log.Debugln("Unsupported event type")
|
||||
eventStats.WithLabelValues("illegal").Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//NewExporter new exporter
|
||||
func NewExporter(mapper *MetricMapper, Register prometheus.Registerer) *Exporter {
|
||||
return &Exporter{
|
||||
Counters: NewCounterContainer(Register),
|
||||
Gauges: NewGaugeContainer(Register),
|
||||
Summaries: NewSummaryContainer(Register),
|
||||
Histograms: NewHistogramContainer(mapper, Register),
|
||||
mapper: mapper,
|
||||
}
|
||||
}
|
||||
|
||||
func buildEvent(statType, metric string, value float64, relative bool, labels map[string]string) (Event, error) {
|
||||
switch statType {
|
||||
case "c":
|
||||
return &CounterEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "g":
|
||||
return &GaugeEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
relative: relative,
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "ms", "h":
|
||||
return &TimerEvent{
|
||||
metricName: metric,
|
||||
value: float64(value),
|
||||
labels: labels,
|
||||
}, nil
|
||||
case "s":
|
||||
return nil, fmt.Errorf("No support for StatsD sets")
|
||||
default:
|
||||
return nil, fmt.Errorf("Bad stat type %s", statType)
|
||||
}
|
||||
}
|
||||
|
||||
func parseDogStatsDTagsToLabels(component string) map[string]string {
|
||||
labels := map[string]string{}
|
||||
tagsReceived.Inc()
|
||||
tags := strings.Split(component, ",")
|
||||
for _, t := range tags {
|
||||
t = strings.TrimPrefix(t, "#")
|
||||
kv := strings.SplitN(t, ":", 2)
|
||||
|
||||
if len(kv) < 2 || len(kv[1]) == 0 {
|
||||
tagErrors.Inc()
|
||||
log.Debugf("Malformed or empty DogStatsD tag %s in component %s", t, component)
|
||||
continue
|
||||
}
|
||||
|
||||
labels[escapeMetricName(kv[0])] = kv[1]
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
func lineToEvents(line string) Events {
|
||||
events := Events{}
|
||||
if line == "" {
|
||||
return events
|
||||
}
|
||||
|
||||
elements := strings.SplitN(line, ":", 2)
|
||||
if len(elements) < 2 || len(elements[0]) == 0 || !utf8.ValidString(line) {
|
||||
sampleErrors.WithLabelValues("malformed_line").Inc()
|
||||
log.Debugln("Bad line from StatsD:", line)
|
||||
return events
|
||||
}
|
||||
metric := elements[0]
|
||||
var samples []string
|
||||
if strings.Contains(elements[1], "|#") {
|
||||
// using datadog extensions, disable multi-metrics
|
||||
samples = elements[1:]
|
||||
} else {
|
||||
samples = strings.Split(elements[1], ":")
|
||||
}
|
||||
samples:
|
||||
for _, sample := range samples {
|
||||
samplesReceived.Inc()
|
||||
components := strings.Split(sample, "|")
|
||||
samplingFactor := 1.0
|
||||
if len(components) < 2 || len(components) > 4 {
|
||||
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||
log.Debugln("Bad component on line:", line)
|
||||
continue
|
||||
}
|
||||
valueStr, statType := components[0], components[1]
|
||||
|
||||
var relative = false
|
||||
if strings.Index(valueStr, "+") == 0 || strings.Index(valueStr, "-") == 0 {
|
||||
relative = true
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(valueStr, 64)
|
||||
if err != nil {
|
||||
log.Debugf("Bad value %s on line: %s", valueStr, line)
|
||||
sampleErrors.WithLabelValues("malformed_value").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
multiplyEvents := 1
|
||||
labels := map[string]string{}
|
||||
if len(components) >= 3 {
|
||||
for _, component := range components[2:] {
|
||||
if len(component) == 0 {
|
||||
log.Debugln("Empty component on line: ", line)
|
||||
sampleErrors.WithLabelValues("malformed_component").Inc()
|
||||
continue samples
|
||||
}
|
||||
}
|
||||
|
||||
for _, component := range components[2:] {
|
||||
switch component[0] {
|
||||
case '@':
|
||||
if statType != "c" && statType != "ms" {
|
||||
log.Debugln("Illegal sampling factor for non-counter metric on line", line)
|
||||
sampleErrors.WithLabelValues("illegal_sample_factor").Inc()
|
||||
continue
|
||||
}
|
||||
samplingFactor, err = strconv.ParseFloat(component[1:], 64)
|
||||
if err != nil {
|
||||
log.Debugf("Invalid sampling factor %s on line %s", component[1:], line)
|
||||
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||
}
|
||||
if samplingFactor == 0 {
|
||||
samplingFactor = 1
|
||||
}
|
||||
|
||||
if statType == "c" {
|
||||
value /= samplingFactor
|
||||
} else if statType == "ms" {
|
||||
multiplyEvents = int(1 / samplingFactor)
|
||||
}
|
||||
case '#':
|
||||
labels = parseDogStatsDTagsToLabels(component)
|
||||
default:
|
||||
log.Debugf("Invalid sampling factor or tag section %s on line %s", components[2], line)
|
||||
sampleErrors.WithLabelValues("invalid_sample_factor").Inc()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < multiplyEvents; i++ {
|
||||
event, err := buildEvent(statType, metric, value, relative, labels)
|
||||
if err != nil {
|
||||
log.Debugf("Error building event on line %s: %s", line, err)
|
||||
sampleErrors.WithLabelValues("illegal_event").Inc()
|
||||
continue
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
type StatsDUDPListener struct {
|
||||
Conn *net.UDPConn
|
||||
}
|
||||
|
||||
func (l *StatsDUDPListener) Listen(e chan<- Events) {
|
||||
buf := make([]byte, 65535)
|
||||
for {
|
||||
n, _, err := l.Conn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
l.handlePacket(buf[0:n], e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *StatsDUDPListener) handlePacket(packet []byte, e chan<- Events) {
|
||||
udpPackets.Inc()
|
||||
lines := strings.Split(string(packet), "\n")
|
||||
events := Events{}
|
||||
for _, line := range lines {
|
||||
linesReceived.Inc()
|
||||
events = append(events, lineToEvents(line)...)
|
||||
}
|
||||
e <- events
|
||||
}
|
||||
|
||||
type StatsDTCPListener struct {
|
||||
Conn *net.TCPListener
|
||||
}
|
||||
|
||||
func (l *StatsDTCPListener) Listen(e chan<- Events) {
|
||||
defer l.Conn.Close()
|
||||
for {
|
||||
c, err := l.Conn.AcceptTCP()
|
||||
if err != nil {
|
||||
log.Fatalf("AcceptTCP failed: %v", err)
|
||||
}
|
||||
go l.handleConn(c, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *StatsDTCPListener) handleConn(c *net.TCPConn, e chan<- Events) {
|
||||
defer c.Close()
|
||||
|
||||
tcpConnections.Inc()
|
||||
|
||||
r := bufio.NewReader(c)
|
||||
for {
|
||||
line, isPrefix, err := r.ReadLine()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
tcpErrors.Inc()
|
||||
log.Debugf("Read %s failed: %v", c.RemoteAddr(), err)
|
||||
}
|
||||
break
|
||||
}
|
||||
if isPrefix {
|
||||
tcpLineTooLong.Inc()
|
||||
log.Debugf("Read %s failed: line too long", c.RemoteAddr())
|
||||
break
|
||||
}
|
||||
linesReceived.Inc()
|
||||
e <- lineToEvents(string(line))
|
||||
}
|
||||
}
|
158
pkg/node/statsd/exporter/mapper.go
Normal file
158
pkg/node/statsd/exporter/mapper.go
Normal file
@ -0,0 +1,158 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
identifierRE = `[a-zA-Z_][a-zA-Z0-9_]+`
|
||||
statsdMetricRE = `[a-zA-Z_](-?[a-zA-Z0-9_])+`
|
||||
|
||||
metricLineRE = regexp.MustCompile(`^(\*\.|` + statsdMetricRE + `\.)+(\*|` + statsdMetricRE + `)$`)
|
||||
labelLineRE = regexp.MustCompile(`^(` + identifierRE + `)\s*=\s*"(.*)"$`)
|
||||
metricNameRE = regexp.MustCompile(`^` + identifierRE + `$`)
|
||||
)
|
||||
|
||||
type mapperConfigDefaults struct {
|
||||
TimerType timerType `yaml:"timer_type"`
|
||||
Buckets []float64 `yaml:"buckets"`
|
||||
MatchType matchType `yaml:"match_type"`
|
||||
}
|
||||
|
||||
//MetricMapper MetricMapper
|
||||
type MetricMapper struct {
|
||||
Defaults mapperConfigDefaults `yaml:"defaults"`
|
||||
Mappings []metricMapping `yaml:"mappings"`
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
type metricMapping struct {
|
||||
Match string `yaml:"match"`
|
||||
Name string `yaml:"name"`
|
||||
regex *regexp.Regexp
|
||||
Labels prometheus.Labels `yaml:"labels"`
|
||||
TimerType timerType `yaml:"timer_type"`
|
||||
Buckets []float64 `yaml:"buckets"`
|
||||
MatchType matchType `yaml:"match_type"`
|
||||
HelpText string `yaml:"help"`
|
||||
}
|
||||
|
||||
func (m *MetricMapper) InitFromYAMLString(fileContents string) error {
|
||||
var n MetricMapper
|
||||
|
||||
if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.Defaults.Buckets == nil || len(n.Defaults.Buckets) == 0 {
|
||||
n.Defaults.Buckets = prometheus.DefBuckets
|
||||
}
|
||||
|
||||
if n.Defaults.MatchType == matchTypeDefault {
|
||||
n.Defaults.MatchType = matchTypeGlob
|
||||
}
|
||||
|
||||
for i := range n.Mappings {
|
||||
currentMapping := &n.Mappings[i]
|
||||
|
||||
// check that label is correct
|
||||
for k := range currentMapping.Labels {
|
||||
if !metricNameRE.MatchString(k) {
|
||||
return fmt.Errorf("invalid label key: %s", k)
|
||||
}
|
||||
}
|
||||
|
||||
if currentMapping.Name == "" {
|
||||
return fmt.Errorf("line %d: metric mapping didn't set a metric name", i)
|
||||
}
|
||||
|
||||
if !metricNameRE.MatchString(currentMapping.Name) {
|
||||
return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE)
|
||||
}
|
||||
|
||||
if currentMapping.MatchType == "" {
|
||||
currentMapping.MatchType = n.Defaults.MatchType
|
||||
}
|
||||
|
||||
if currentMapping.MatchType == matchTypeGlob {
|
||||
if !metricLineRE.MatchString(currentMapping.Match) {
|
||||
return fmt.Errorf("invalid match: %s", currentMapping.Match)
|
||||
}
|
||||
// Translate the glob-style metric match line into a proper regex that we
|
||||
// can use to match metrics later on.
|
||||
metricRe := strings.Replace(currentMapping.Match, ".", "\\.", -1)
|
||||
metricRe = strings.Replace(metricRe, "*", "([^.]*)", -1)
|
||||
currentMapping.regex = regexp.MustCompile("^" + metricRe + "$")
|
||||
} else {
|
||||
currentMapping.regex = regexp.MustCompile(currentMapping.Match)
|
||||
}
|
||||
|
||||
if currentMapping.TimerType == "" {
|
||||
currentMapping.TimerType = n.Defaults.TimerType
|
||||
}
|
||||
|
||||
if currentMapping.Buckets == nil || len(currentMapping.Buckets) == 0 {
|
||||
currentMapping.Buckets = n.Defaults.Buckets
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
m.Defaults = n.Defaults
|
||||
m.Mappings = n.Mappings
|
||||
|
||||
mappingsCount.Set(float64(len(n.Mappings)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MetricMapper) InitFromFile(fileName string) error {
|
||||
mappingStr, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.InitFromYAMLString(string(mappingStr))
|
||||
}
|
||||
|
||||
func (m *MetricMapper) getMapping(statsdMetric string) (*metricMapping, prometheus.Labels, bool) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
for _, mapping := range m.Mappings {
|
||||
matches := mapping.regex.FindStringSubmatchIndex(statsdMetric)
|
||||
if len(matches) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
labels := prometheus.Labels{}
|
||||
for label, valueExpr := range mapping.Labels {
|
||||
value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)
|
||||
labels[label] = string(value)
|
||||
}
|
||||
return &mapping, labels, true
|
||||
}
|
||||
|
||||
return nil, nil, false
|
||||
}
|
41
pkg/node/statsd/exporter/match.go
Normal file
41
pkg/node/statsd/exporter/match.go
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exporter
|
||||
|
||||
import "fmt"
|
||||
|
||||
type matchType string
|
||||
|
||||
const (
|
||||
matchTypeGlob matchType = "glob"
|
||||
matchTypeRegex matchType = "regex"
|
||||
matchTypeDefault matchType = ""
|
||||
)
|
||||
|
||||
func (t *matchType) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var v string
|
||||
if err := unmarshal(&v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch matchType(v) {
|
||||
case matchTypeRegex:
|
||||
*t = matchTypeRegex
|
||||
case matchTypeGlob, matchTypeDefault:
|
||||
*t = matchTypeGlob
|
||||
default:
|
||||
return fmt.Errorf("invalid match type %q", v)
|
||||
}
|
||||
return nil
|
||||
}
|
122
pkg/node/statsd/exporter/telemetry.go
Normal file
122
pkg/node/statsd/exporter/telemetry.go
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exporter
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
eventStats = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_total",
|
||||
Help: "The total number of StatsD events seen.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
eventsUnmapped = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_unmapped_total",
|
||||
Help: "The total number of StatsD events no mapping was found for.",
|
||||
})
|
||||
udpPackets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_udp_packets_total",
|
||||
Help: "The total number of StatsD packets received over UDP.",
|
||||
},
|
||||
)
|
||||
tcpConnections = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_connections_total",
|
||||
Help: "The total number of TCP connections handled.",
|
||||
},
|
||||
)
|
||||
tcpErrors = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_connection_errors_total",
|
||||
Help: "The number of errors encountered reading from TCP.",
|
||||
},
|
||||
)
|
||||
tcpLineTooLong = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tcp_too_long_lines_total",
|
||||
Help: "The number of lines discarded due to being too long.",
|
||||
},
|
||||
)
|
||||
linesReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_lines_total",
|
||||
Help: "The total number of StatsD lines received.",
|
||||
},
|
||||
)
|
||||
samplesReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_samples_total",
|
||||
Help: "The total number of StatsD samples received.",
|
||||
},
|
||||
)
|
||||
sampleErrors = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_sample_errors_total",
|
||||
Help: "The total number of errors parsing StatsD samples.",
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
tagsReceived = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tags_total",
|
||||
Help: "The total number of DogStatsD tags processed.",
|
||||
},
|
||||
)
|
||||
tagErrors = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_tag_errors_total",
|
||||
Help: "The number of errors parsign DogStatsD tags.",
|
||||
},
|
||||
)
|
||||
ConfigLoads = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_config_reloads_total",
|
||||
Help: "The number of configuration reloads.",
|
||||
},
|
||||
[]string{"outcome"},
|
||||
)
|
||||
mappingsCount = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "statsd_exporter_loaded_mappings",
|
||||
Help: "The current number of configured metric mappings.",
|
||||
})
|
||||
conflictingEventStats = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "statsd_exporter_events_conflict_total",
|
||||
Help: "The total number of StatsD events with conflicting names.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
)
|
||||
|
||||
//MetryInit init
|
||||
func MetryInit(registry *prometheus.Registry) {
|
||||
registry.MustRegister(eventStats)
|
||||
registry.MustRegister(udpPackets)
|
||||
registry.MustRegister(tcpConnections)
|
||||
registry.MustRegister(tcpErrors)
|
||||
registry.MustRegister(tcpLineTooLong)
|
||||
registry.MustRegister(linesReceived)
|
||||
registry.MustRegister(samplesReceived)
|
||||
registry.MustRegister(sampleErrors)
|
||||
registry.MustRegister(tagsReceived)
|
||||
registry.MustRegister(tagErrors)
|
||||
registry.MustRegister(ConfigLoads)
|
||||
registry.MustRegister(mappingsCount)
|
||||
registry.MustRegister(conflictingEventStats)
|
||||
}
|
41
pkg/node/statsd/exporter/timer.go
Normal file
41
pkg/node/statsd/exporter/timer.go
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exporter
|
||||
|
||||
import "fmt"
|
||||
|
||||
type timerType string
|
||||
|
||||
const (
|
||||
timerTypeHistogram timerType = "histogram"
|
||||
timerTypeSummary timerType = "summary"
|
||||
timerTypeDefault timerType = ""
|
||||
)
|
||||
|
||||
func (t *timerType) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var v string
|
||||
if err := unmarshal(&v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch timerType(v) {
|
||||
case timerTypeHistogram:
|
||||
*t = timerTypeHistogram
|
||||
case timerTypeSummary, timerTypeDefault:
|
||||
*t = timerTypeSummary
|
||||
default:
|
||||
return fmt.Errorf("invalid timer type '%s'", v)
|
||||
}
|
||||
return nil
|
||||
}
|
222
pkg/node/statsd/statsd_export.go
Normal file
222
pkg/node/statsd/statsd_export.go
Normal file
@ -0,0 +1,222 @@
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package statsd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/howeyc/fsnotify"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/node/option"
|
||||
"github.com/goodrain/rainbond/pkg/node/statsd/exporter"
|
||||
)
|
||||
|
||||
//Exporter receive statsd metric and export prometheus metric
|
||||
type Exporter struct {
|
||||
statsdListenAddress string
|
||||
statsdListenUDP string
|
||||
statsdListenTCP string
|
||||
mappingConfig string
|
||||
readBuffer int
|
||||
exporter *exporter.Exporter
|
||||
register *prometheus.Registry
|
||||
mapper *exporter.MetricMapper
|
||||
}
|
||||
|
||||
//CreateExporter create a exporter
|
||||
func CreateExporter(sc option.StatsdConfig, register *prometheus.Registry) *Exporter {
|
||||
exp := &Exporter{
|
||||
statsdListenAddress: sc.StatsdListenAddress,
|
||||
statsdListenTCP: sc.StatsdListenTCP,
|
||||
statsdListenUDP: sc.StatsdListenUDP,
|
||||
readBuffer: sc.ReadBuffer,
|
||||
mappingConfig: sc.MappingConfig,
|
||||
register: register,
|
||||
}
|
||||
exporter.MetryInit(register)
|
||||
return exp
|
||||
}
|
||||
|
||||
//Start Start
|
||||
func (e *Exporter) Start() error {
|
||||
e.register.Register(version.NewCollector("statsd_exporter"))
|
||||
if e.statsdListenAddress != "" {
|
||||
logrus.Warnln("Warning: statsd.listen-address is DEPRECATED, please use statsd.listen-udp instead.")
|
||||
e.statsdListenUDP = e.statsdListenAddress
|
||||
}
|
||||
|
||||
if e.statsdListenUDP == "" && e.statsdListenTCP == "" {
|
||||
logrus.Fatalln("At least one of UDP/TCP listeners must be specified.")
|
||||
return fmt.Errorf("At least one of UDP/TCP listeners must be specified")
|
||||
}
|
||||
|
||||
logrus.Infoln("Starting StatsD -> Prometheus Exporter", version.Info())
|
||||
logrus.Infoln("Build context", version.BuildContext())
|
||||
logrus.Infof("Accepting StatsD Traffic: UDP %v, TCP %v", e.statsdListenUDP, e.statsdListenTCP)
|
||||
|
||||
events := make(chan exporter.Events, 1024)
|
||||
|
||||
if e.statsdListenUDP != "" {
|
||||
udpListenAddr := udpAddrFromString(e.statsdListenUDP)
|
||||
uconn, err := net.ListenUDP("udp", udpListenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.readBuffer != 0 {
|
||||
err = uconn.SetReadBuffer(e.readBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ul := &exporter.StatsDUDPListener{Conn: uconn}
|
||||
go ul.Listen(events)
|
||||
}
|
||||
|
||||
if e.statsdListenTCP != "" {
|
||||
tcpListenAddr := tcpAddrFromString(e.statsdListenTCP)
|
||||
tconn, err := net.ListenTCP("tcp", tcpListenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tl := &exporter.StatsDTCPListener{Conn: tconn}
|
||||
go tl.Listen(events)
|
||||
}
|
||||
|
||||
mapper := &exporter.MetricMapper{}
|
||||
if e.mappingConfig != "" {
|
||||
err := mapper.InitFromFile(e.mappingConfig)
|
||||
if err != nil {
|
||||
logrus.Fatal("Error loading config:", err)
|
||||
return err
|
||||
}
|
||||
//观察文件变化进行重新reload是有风险的,采用API重新加载
|
||||
//go watchConfig(e.mappingConfig, mapper)
|
||||
}
|
||||
exporter := exporter.NewExporter(mapper, e.register)
|
||||
e.exporter = exporter
|
||||
e.mapper = mapper
|
||||
go exporter.Listen(events)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Describe implements the prometheus.Collector interface.
|
||||
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
||||
}
|
||||
|
||||
// Collect implements the prometheus.Collector interface.
|
||||
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
|
||||
}
|
||||
|
||||
//GetRegister GetRegister
|
||||
func (e *Exporter) GetRegister() *prometheus.Registry {
|
||||
return e.register
|
||||
}
|
||||
|
||||
func ipPortFromString(addr string) (*net.IPAddr, int) {
|
||||
host, portStr, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
logrus.Fatal("Bad StatsD listening address", addr)
|
||||
}
|
||||
|
||||
if host == "" {
|
||||
host = "0.0.0.0"
|
||||
}
|
||||
ip, err := net.ResolveIPAddr("ip", host)
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to resolve %s: %s", host, err)
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil || port < 0 || port > 65535 {
|
||||
logrus.Fatalf("Bad port %s: %s", portStr, err)
|
||||
}
|
||||
|
||||
return ip, port
|
||||
}
|
||||
|
||||
func udpAddrFromString(addr string) *net.UDPAddr {
|
||||
ip, port := ipPortFromString(addr)
|
||||
return &net.UDPAddr{
|
||||
IP: ip.IP,
|
||||
Port: port,
|
||||
Zone: ip.Zone,
|
||||
}
|
||||
}
|
||||
|
||||
func tcpAddrFromString(addr string) *net.TCPAddr {
|
||||
ip, port := ipPortFromString(addr)
|
||||
return &net.TCPAddr{
|
||||
IP: ip.IP,
|
||||
Port: port,
|
||||
Zone: ip.Zone,
|
||||
}
|
||||
}
|
||||
|
||||
func watchConfig(fileName string, mapper *exporter.MetricMapper) {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Event:
|
||||
logrus.Infof("Config file changed (%s), attempting reload", ev)
|
||||
err = mapper.InitFromFile(fileName)
|
||||
if err != nil {
|
||||
logrus.Errorln("Error reloading config:", err)
|
||||
exporter.ConfigLoads.WithLabelValues("failure").Inc()
|
||||
} else {
|
||||
logrus.Infoln("Config reloaded successfully")
|
||||
exporter.ConfigLoads.WithLabelValues("success").Inc()
|
||||
}
|
||||
// Re-add the file watcher since it can get lost on some changes. E.g.
|
||||
// saving a file with vim results in a RENAME-MODIFY-DELETE event
|
||||
// sequence, after which the newly written file is no longer watched.
|
||||
err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
|
||||
case err := <-watcher.Error:
|
||||
logrus.Errorln("Error watching config:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//ReloadConfig reload mapper config file
|
||||
func (e *Exporter) ReloadConfig() (err error) {
|
||||
logrus.Infof("Config file changed, attempting reload")
|
||||
err = e.mapper.InitFromFile(e.mappingConfig)
|
||||
if err != nil {
|
||||
logrus.Errorln("Error reloading config:", err)
|
||||
exporter.ConfigLoads.WithLabelValues("failure").Inc()
|
||||
} else {
|
||||
logrus.Infoln("Config reloaded successfully")
|
||||
exporter.ConfigLoads.WithLabelValues("success").Inc()
|
||||
}
|
||||
return
|
||||
}
|
@ -1,33 +1,33 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
"github.com/goodrain/rainbond/cmd/worker/option"
|
||||
"github.com/goodrain/rainbond/pkg/db"
|
||||
"github.com/goodrain/rainbond/pkg/db/model"
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/worker/option"
|
||||
"github.com/goodrain/rainbond/pkg/db"
|
||||
"github.com/goodrain/rainbond/pkg/db/model"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@ -196,7 +196,7 @@ func (s *statusManager) checkStatus() {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas && d.Status.Replicas != 0 {
|
||||
s.SetStatus(serviceID, RUNNING)
|
||||
break
|
||||
} else {
|
||||
@ -219,7 +219,7 @@ func (s *statusManager) checkStatus() {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas && d.Status.Replicas != 0 {
|
||||
s.SetStatus(serviceID, RUNNING)
|
||||
break
|
||||
} else {
|
||||
@ -243,7 +243,7 @@ func (s *statusManager) checkStatus() {
|
||||
} else {
|
||||
readycount := s.getReadyCount(d.Namespace,
|
||||
d.Labels["name"], d.Labels["version"])
|
||||
if readycount >= d.Status.Replicas {
|
||||
if readycount >= d.Status.Replicas && d.Status.Replicas != 0 {
|
||||
s.SetStatus(serviceID, RUNNING)
|
||||
break
|
||||
} else {
|
||||
@ -265,7 +265,7 @@ func (s *statusManager) checkStatus() {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas {
|
||||
if d.Status.ReadyReplicas >= d.Status.Replicas && d.Status.Replicas != 0 {
|
||||
s.SetStatus(serviceID, RUNNING)
|
||||
break
|
||||
}
|
||||
|
@ -26,7 +26,9 @@ import (
|
||||
|
||||
// WaitEvents waits on a key until it observes the given events and returns the final one.
|
||||
func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
|
||||
wc := c.Watch(context.Background(), key, clientv3.WithRev(rev))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
wc := c.Watch(ctx, key, clientv3.WithRev(rev))
|
||||
if wc == nil {
|
||||
return nil, ErrNoWatcher
|
||||
}
|
||||
@ -35,7 +37,9 @@ func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_Ev
|
||||
|
||||
//WatchPrefixEvents watch prefix
|
||||
func WatchPrefixEvents(c *clientv3.Client, prefix string, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
|
||||
wc := c.Watch(context.Background(), prefix, clientv3.WithPrefix())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
wc := c.Watch(ctx, prefix, clientv3.WithPrefix())
|
||||
if wc == nil {
|
||||
return nil, ErrNoWatcher
|
||||
}
|
||||
@ -44,7 +48,9 @@ func WatchPrefixEvents(c *clientv3.Client, prefix string, evs []mvccpb.Event_Eve
|
||||
|
||||
//WaitPrefixEvents WaitPrefixEvents
|
||||
func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
|
||||
wc := c.Watch(context.Background(), prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
|
||||
if wc == nil {
|
||||
return nil, ErrNoWatcher
|
||||
}
|
||||
|
@ -90,6 +90,8 @@ func (m *manager) StartReplicationController(serviceID string, logger event.Logg
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
//CheckVersionInfo CheckVersionInfo
|
||||
func CheckVersionInfo(version *model.VersionInfo) bool {
|
||||
if !strings.Contains(strings.ToLower(version.FinalStatus),"success") {
|
||||
return false
|
||||
|
@ -105,12 +105,12 @@ func (m *manager) updateService(serviceID, tenantID string, service *v1.Service,
|
||||
ReplicationType: ReplicationType,
|
||||
K8sServiceID: service.Name,
|
||||
}
|
||||
if len(service.Spec.Ports) > 0 {
|
||||
if len(service.Spec.Ports) == 1 {
|
||||
k8sService.ContainerPort = int(service.Spec.Ports[0].Port)
|
||||
} else { //有状态服务 用于服务发现的service,不存port
|
||||
|
||||
}
|
||||
if len(service.Spec.Ports) == 1 {
|
||||
k8sService.ContainerPort = int(service.Spec.Ports[0].Port)
|
||||
}
|
||||
//有状态service不存储port,避免存储失败
|
||||
if service.Labels["service_type"] == "stateful" {
|
||||
k8sService.ContainerPort = 0
|
||||
}
|
||||
if strings.HasSuffix(service.Name, "out") {
|
||||
k8sService.IsOut = true
|
||||
@ -154,6 +154,10 @@ func (m *manager) createService(serviceID, tenantID string, service *v1.Service,
|
||||
if len(service.Spec.Ports) > 0 {
|
||||
k8sService.ContainerPort = int(service.Spec.Ports[0].TargetPort.IntVal)
|
||||
}
|
||||
//有状态service不存储port,避免存储失败
|
||||
if service.Labels["service_type"] == "stateful" {
|
||||
k8sService.ContainerPort = 0
|
||||
}
|
||||
err = m.dbmanager.K8sServiceDao().AddModel(k8sService)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("存储K8sService(%s)信息到数据库失败", service.Name), map[string]string{"step": "worker-appm", "status": "failure"})
|
||||
|
@ -431,6 +431,31 @@ func (p *PodTemplateSpecBuild) createAdapterResources(memory int, cpu int) v1.Re
|
||||
}
|
||||
}
|
||||
|
||||
//createPluginResources
|
||||
//memory Mb
|
||||
//cpu (core*1000)
|
||||
//TODO:插件的资源限制,CPU暂时不限制
|
||||
func (p *PodTemplateSpecBuild) createPluginResources(memory int, cpu int) v1.ResourceRequirements {
|
||||
limits := v1.ResourceList{}
|
||||
// limits[v1.ResourceCPU] = *resource.NewMilliQuantity(
|
||||
// int64(cpu*3),
|
||||
// resource.DecimalSI)
|
||||
limits[v1.ResourceMemory] = *resource.NewQuantity(
|
||||
int64(memory*1024*1024),
|
||||
resource.BinarySI)
|
||||
request := v1.ResourceList{}
|
||||
// request[v1.ResourceCPU] = *resource.NewMilliQuantity(
|
||||
// int64(cpu*2),
|
||||
// resource.DecimalSI)
|
||||
request[v1.ResourceMemory] = *resource.NewQuantity(
|
||||
int64(memory*1024*1024),
|
||||
resource.BinarySI)
|
||||
return v1.ResourceRequirements{
|
||||
Limits: limits,
|
||||
Requests: request,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PodTemplateSpecBuild) createResources() v1.ResourceRequirements {
|
||||
var cpuRequest, cpuLimit int64
|
||||
memory := p.service.ContainerMemory
|
||||
@ -768,7 +793,7 @@ func (p *PodTemplateSpecBuild) createPluginsContainer(mainEnvs *[]v1.EnvVar) ([]
|
||||
Name: pluginR.PluginID,
|
||||
Image: versionInfo.BuildLocalImage,
|
||||
Env: *envs,
|
||||
Resources: p.createAdapterResources(versionInfo.ContainerMemory, versionInfo.ContainerCPU),
|
||||
Resources: p.createPluginResources(versionInfo.ContainerMemory, versionInfo.ContainerCPU),
|
||||
TerminationMessagePath: "",
|
||||
Args: args,
|
||||
}
|
||||
|
31
test/mapper.yml
Normal file
31
test/mapper.yml
Normal file
@ -0,0 +1,31 @@
|
||||
#
|
||||
mappings:
|
||||
- match: "*.*.*.request.*"
|
||||
name: "app_request"
|
||||
labels:
|
||||
service_id: "$1"
|
||||
port: "$2"
|
||||
protocol: $3
|
||||
method: "$4"
|
||||
- match: "*.*.*.request.unusual.*"
|
||||
name: "app_request_unusual"
|
||||
labels:
|
||||
service_id: "$1"
|
||||
port: "$2"
|
||||
protocol: $3
|
||||
code: "$4"
|
||||
|
||||
- match: "*.*.*.requesttime.*"
|
||||
name: "app_requesttime"
|
||||
labels:
|
||||
service_id: "$1"
|
||||
port: "$2"
|
||||
protocol: $3
|
||||
mode: "$4"
|
||||
|
||||
- match: "*.*.*.requestclient"
|
||||
name: "app_requestclient"
|
||||
labels:
|
||||
service_id: "$1"
|
||||
port: "$2"
|
||||
protocol: $3
|
222
vendor/github.com/coreos/etcd/clientv3.old/auth.go
generated
vendored
Normal file
222
vendor/github.com/coreos/etcd/clientv3.old/auth.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
AuthEnableResponse pb.AuthEnableResponse
|
||||
AuthDisableResponse pb.AuthDisableResponse
|
||||
AuthenticateResponse pb.AuthenticateResponse
|
||||
AuthUserAddResponse pb.AuthUserAddResponse
|
||||
AuthUserDeleteResponse pb.AuthUserDeleteResponse
|
||||
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
|
||||
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
|
||||
AuthUserGetResponse pb.AuthUserGetResponse
|
||||
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
|
||||
AuthRoleAddResponse pb.AuthRoleAddResponse
|
||||
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
|
||||
AuthRoleGetResponse pb.AuthRoleGetResponse
|
||||
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
|
||||
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
|
||||
AuthUserListResponse pb.AuthUserListResponse
|
||||
AuthRoleListResponse pb.AuthRoleListResponse
|
||||
|
||||
PermissionType authpb.Permission_Type
|
||||
Permission authpb.Permission
|
||||
)
|
||||
|
||||
const (
|
||||
PermRead = authpb.READ
|
||||
PermWrite = authpb.WRITE
|
||||
PermReadWrite = authpb.READWRITE
|
||||
)
|
||||
|
||||
type Auth interface {
|
||||
// AuthEnable enables auth of an etcd cluster.
|
||||
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
|
||||
|
||||
// AuthDisable disables auth of an etcd cluster.
|
||||
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
|
||||
|
||||
// UserAdd adds a new user to an etcd cluster.
|
||||
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
|
||||
|
||||
// UserDelete deletes a user from an etcd cluster.
|
||||
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
|
||||
|
||||
// UserChangePassword changes a password of a user.
|
||||
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
|
||||
|
||||
// UserGrantRole grants a role to a user.
|
||||
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
|
||||
|
||||
// UserGet gets a detailed information of a user.
|
||||
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
|
||||
|
||||
// UserList gets a list of all users.
|
||||
UserList(ctx context.Context) (*AuthUserListResponse, error)
|
||||
|
||||
// UserRevokeRole revokes a role of a user.
|
||||
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
|
||||
|
||||
// RoleAdd adds a new role to an etcd cluster.
|
||||
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
|
||||
|
||||
// RoleGrantPermission grants a permission to a role.
|
||||
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
|
||||
|
||||
// RoleGet gets a detailed information of a role.
|
||||
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
|
||||
|
||||
// RoleList gets a list of all roles.
|
||||
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
|
||||
|
||||
// RoleRevokePermission revokes a permission from a role.
|
||||
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
|
||||
|
||||
// RoleDelete deletes a role.
|
||||
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
|
||||
}
|
||||
|
||||
type auth struct {
|
||||
remote pb.AuthClient
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
|
||||
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
|
||||
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
|
||||
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
|
||||
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
|
||||
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
|
||||
perm := &authpb.Permission{
|
||||
Key: []byte(key),
|
||||
RangeEnd: []byte(rangeEnd),
|
||||
PermType: authpb.Permission_Type(permType),
|
||||
}
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
|
||||
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
|
||||
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
|
||||
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func StrToPermissionType(s string) (PermissionType, error) {
|
||||
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
|
||||
if ok {
|
||||
return PermissionType(val), nil
|
||||
}
|
||||
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
|
||||
}
|
||||
|
||||
type authenticator struct {
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authenticator) close() {
|
||||
auth.conn.Close()
|
||||
}
|
||||
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authenticator{
|
||||
conn: conn,
|
||||
remote: pb.NewAuthClient(conn),
|
||||
}, nil
|
||||
}
|
526
vendor/github.com/coreos/etcd/clientv3.old/client.go
generated
vendored
Normal file
526
vendor/github.com/coreos/etcd/clientv3.old/client.go
generated
vendored
Normal file
@ -0,0 +1,526 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||
)
|
||||
|
||||
// Client provides and manages an etcd v3 client session.
|
||||
type Client struct {
|
||||
Cluster
|
||||
KV
|
||||
Lease
|
||||
Watcher
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
func New(cfg Config) (*Client, error) {
|
||||
if len(cfg.Endpoints) == 0 {
|
||||
return nil, ErrNoAvailableEndpoints
|
||||
}
|
||||
|
||||
return newClient(&cfg)
|
||||
}
|
||||
|
||||
// NewCtxClient creates a client with a context but no underlying grpc
|
||||
// connection. This is useful for embedded cases that override the
|
||||
// service interface implementations and do not need connection management.
|
||||
func NewCtxClient(ctx context.Context) *Client {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
return &Client{ctx: cctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// NewFromURL creates a new etcdv3 client from a URL.
|
||||
func NewFromURL(url string) (*Client, error) {
|
||||
return New(Config{Endpoints: []string{url}})
|
||||
}
|
||||
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
c.Watcher.Close()
|
||||
c.Lease.Close()
|
||||
if c.conn != nil {
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// Ctx is a context for "out of band" messages (e.g., for sending
|
||||
// "clean up" message when another context is canceled). It is
|
||||
// canceled on client Close().
|
||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||
|
||||
// Endpoints lists the registered endpoints for the client.
|
||||
func (c *Client) Endpoints() (eps []string) {
|
||||
// copy the slice; protect original endpoints from being changed
|
||||
eps = make([]string, len(c.cfg.Endpoints))
|
||||
copy(eps, c.cfg.Endpoints)
|
||||
return
|
||||
}
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
func (c *Client) Sync(ctx context.Context) error {
|
||||
mresp, err := c.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var eps []string
|
||||
for _, m := range mresp.Members {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
c.SetEndpoints(eps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) autoSync() {
|
||||
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type authTokenCredential struct {
|
||||
token string
|
||||
tokenMu *sync.RWMutex
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
cred.tokenMu.RLock()
|
||||
defer cred.tokenMu.RUnlock()
|
||||
return map[string]string{
|
||||
"token": cred.token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
proto = "tcp"
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
// strip scheme:// prefix since grpc dials by host
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix", "unixs":
|
||||
proto = "unix"
|
||||
host = url.Host + url.Path
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
creds = c.creds
|
||||
switch scheme {
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https", "unixs":
|
||||
if creds != nil {
|
||||
break
|
||||
}
|
||||
tlsconfig := &tls.Config{}
|
||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||
creds = &emptyCreds
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
}
|
||||
// Only relevant when KeepAliveTime is non-zero
|
||||
if c.cfg.DialKeepAliveTimeout > 0 {
|
||||
params.Timeout = c.cfg.DialKeepAliveTimeout
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
proto, host, _ = parseEndpoint(endpoint)
|
||||
}
|
||||
if proto == "" {
|
||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||
}
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return nil, c.ctx.Err()
|
||||
default:
|
||||
}
|
||||
dialer := &net.Dialer{Timeout: t}
|
||||
conn, err := dialer.DialContext(c.ctx, proto, host)
|
||||
if err != nil {
|
||||
select {
|
||||
case c.dialerrc <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(f))
|
||||
|
||||
creds := c.creds
|
||||
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||
creds = c.processCreds(scheme)
|
||||
}
|
||||
if creds != nil {
|
||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// Dial connects to a single endpoint using the client's config.
|
||||
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||
return c.dial(endpoint)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
c.tokenCred.tokenMu.Lock()
|
||||
c.tokenCred.token = resp.Token
|
||||
c.tokenCred.tokenMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||
host := getHost(endpoint)
|
||||
if c.Username != "" && c.Password != "" {
|
||||
c.tokenCred = &authTokenCredential{
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
|
||||
ctx := c.ctx
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
|
||||
err := c.getToken(ctx)
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
}
|
||||
|
||||
opts = append(opts, c.cfg.DialOptions...)
|
||||
|
||||
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// WithRequireLeader requires client requests to only succeed
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
var creds *credentials.TransportCredentials
|
||||
if cfg.TLS != nil {
|
||||
c := credentials.NewTLS(cfg.TLS)
|
||||
creds = &c
|
||||
}
|
||||
|
||||
// use a temporary skeleton client to bootstrap first connection
|
||||
baseCtx := context.TODO()
|
||||
if cfg.Context != nil {
|
||||
baseCtx = cfg.Context
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(baseCtx)
|
||||
client := &Client{
|
||||
conn: nil,
|
||||
dialerrc: make(chan error, 1),
|
||||
cfg: *cfg,
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the ServerName is in the endpoint.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := grpc.ErrClientConnTimeout
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
}
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
client.Cluster = NewCluster(client)
|
||||
client.KV = NewKV(client)
|
||||
client.Lease = NewLease(client)
|
||||
client.Watcher = NewWatcher(client)
|
||||
client.Auth = NewAuth(client)
|
||||
client.Maintenance = NewMaintenance(client)
|
||||
|
||||
if cfg.RejectOldCluster {
|
||||
if err := client.checkVersion(); err != nil {
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go client.autoSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkVersion() (err error) {
|
||||
var wg sync.WaitGroup
|
||||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
// if cluster is current, any endpoint gives a recent version
|
||||
go func(e string) {
|
||||
defer wg.Done()
|
||||
resp, rerr := c.Status(ctx, e)
|
||||
if rerr != nil {
|
||||
errc <- rerr
|
||||
return
|
||||
}
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, rerr = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
rerr = ErrOldCluster
|
||||
}
|
||||
errc <- rerr
|
||||
}(ep)
|
||||
}
|
||||
// wait for success
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
if err = <-errc; err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// ActiveConnection returns the current in-use connection
|
||||
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||
|
||||
// isHaltErr returns true if the given error and context indicate no forward
|
||||
// progress can be made, even after reconnecting.
|
||||
func isHaltErr(ctx context.Context, err error) bool {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = rpctypes.Error(err)
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
case codes.Canceled:
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func canceledByCaller(stopCtx context.Context, err error) bool {
|
||||
if stopCtx.Err() == nil || err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return err == context.Canceled || err == context.DeadlineExceeded
|
||||
}
|
154
vendor/github.com/coreos/etcd/clientv3.old/client_test.go
generated
vendored
Normal file
154
vendor/github.com/coreos/etcd/clientv3.old/client_test.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestDialCancel(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
// accept first connection so client is created with dial timeout
|
||||
ln, err := net.Listen("unix", "dialcancel:12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
ep := "unix://dialcancel:12345"
|
||||
cfg := Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: 30 * time.Second}
|
||||
c, err := New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// connect to ipv4 blackhole so dial blocks
|
||||
c.SetEndpoints("http://254.0.0.1:12345")
|
||||
|
||||
// issue Get to force redial attempts
|
||||
getc := make(chan struct{})
|
||||
go func() {
|
||||
defer close(getc)
|
||||
// Get may hang forever on grpc's Stream.Header() if its
|
||||
// context is never canceled.
|
||||
c.Get(c.Ctx(), "abc")
|
||||
}()
|
||||
|
||||
// wait a little bit so client close is after dial starts
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c.Close()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("failed to close")
|
||||
case <-donec:
|
||||
}
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("get failed to exit")
|
||||
case <-getc:
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
testCfgs := []Config{
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second,
|
||||
},
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: time.Second,
|
||||
Username: "abc",
|
||||
Password: "def",
|
||||
},
|
||||
}
|
||||
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("#%d: dial didn't wait (%v)", i, err)
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialNoTimeout(t *testing.T) {
|
||||
cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
|
||||
c, err := New(cfg)
|
||||
if c == nil || err != nil {
|
||||
t.Fatalf("new client with DialNoWait should succeed, got %v", err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
|
||||
func TestIsHaltErr(t *testing.T) {
|
||||
if !isHaltErr(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
|
||||
t.Errorf(`error prefixed with "etcdserver: " should be Halted by default`)
|
||||
}
|
||||
if isHaltErr(nil, rpctypes.ErrGRPCStopped) {
|
||||
t.Errorf("error %v should not halt", rpctypes.ErrGRPCStopped)
|
||||
}
|
||||
if isHaltErr(nil, rpctypes.ErrGRPCNoLeader) {
|
||||
t.Errorf("error %v should not halt", rpctypes.ErrGRPCNoLeader)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
if isHaltErr(ctx, nil) {
|
||||
t.Errorf("no error and active context should not be Halted")
|
||||
}
|
||||
cancel()
|
||||
if !isHaltErr(ctx, nil) {
|
||||
t.Errorf("cancel on context should be Halted")
|
||||
}
|
||||
}
|
66
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/example_key_test.go
generated
vendored
Normal file
66
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/example_key_test.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3util_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/clientv3util"
|
||||
)
|
||||
|
||||
func ExampleKeyExists_put() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a put only if key is missing
|
||||
// It is useful to do the check (transactionally) to avoid overwriting
|
||||
// the existing key which would generate potentially unwanted events,
|
||||
// unless of course you wanted to do an overwrite no matter what.
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
If(clientv3util.KeyMissing("purpleidea")).
|
||||
Then(clientv3.OpPut("purpleidea", "hello world")).
|
||||
Commit()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKeyExists_delete() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a delete only if key already exists
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
If(clientv3util.KeyExists("purpleidea")).
|
||||
Then(clientv3.OpDelete("purpleidea")).
|
||||
Commit()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
33
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/util.go
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/util.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clientv3util contains utility functions derived from clientv3.
|
||||
package clientv3util
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
// KeyExists returns a comparison operation that evaluates to true iff the given
|
||||
// key exists. It does this by checking if the key `Version` is greater than 0.
|
||||
// It is a useful guard in transaction delete operations.
|
||||
func KeyExists(key string) clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.Version(key), ">", 0)
|
||||
}
|
||||
|
||||
// KeyMissing returns a comparison operation that evaluates to true iff the
|
||||
// given key does not exist.
|
||||
func KeyMissing(key string) clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.Version(key), "=", 0)
|
||||
}
|
100
vendor/github.com/coreos/etcd/clientv3.old/cluster.go
generated
vendored
Normal file
100
vendor/github.com/coreos/etcd/clientv3.old/cluster.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
Member pb.Member
|
||||
MemberListResponse pb.MemberListResponse
|
||||
MemberAddResponse pb.MemberAddResponse
|
||||
MemberRemoveResponse pb.MemberRemoveResponse
|
||||
MemberUpdateResponse pb.MemberUpdateResponse
|
||||
)
|
||||
|
||||
type Cluster interface {
|
||||
// MemberList lists the current cluster membership.
|
||||
MemberList(ctx context.Context) (*MemberListResponse, error)
|
||||
|
||||
// MemberAdd adds a new member into the cluster.
|
||||
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||
|
||||
// MemberRemove removes an existing member from the cluster.
|
||||
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
|
||||
|
||||
// MemberUpdate updates the peer addresses of the member.
|
||||
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
remote pb.ClusterClient
|
||||
}
|
||||
|
||||
func NewCluster(c *Client) Cluster {
|
||||
return &cluster{remote: RetryClusterClient(c)}
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
return &cluster{remote: remote}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberAddResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberRemoveResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
53
vendor/github.com/coreos/etcd/clientv3.old/compact_op.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/clientv3.old/compact_op.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
// CompactOp represents a compact operation.
|
||||
type CompactOp struct {
|
||||
revision int64
|
||||
physical bool
|
||||
}
|
||||
|
||||
// CompactOption configures compact operation.
|
||||
type CompactOption func(*CompactOp)
|
||||
|
||||
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// OpCompact wraps slice CompactOption to create a CompactOp.
|
||||
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
|
||||
ret := CompactOp{revision: rev}
|
||||
ret.applyCompactOpts(opts)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
30
vendor/github.com/coreos/etcd/clientv3.old/compact_op_test.go
generated
vendored
Normal file
30
vendor/github.com/coreos/etcd/clientv3.old/compact_op_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
func TestCompactOp(t *testing.T) {
|
||||
req1 := OpCompact(100, WithCompactPhysical()).toRequest()
|
||||
req2 := &etcdserverpb.CompactionRequest{Revision: 100, Physical: true}
|
||||
if !reflect.DeepEqual(req1, req2) {
|
||||
t.Fatalf("expected %+v, got %+v", req2, req1)
|
||||
}
|
||||
}
|
122
vendor/github.com/coreos/etcd/clientv3.old/compare.go
generated
vendored
Normal file
122
vendor/github.com/coreos/etcd/clientv3.old/compare.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type CompareTarget int
|
||||
type CompareResult int
|
||||
|
||||
const (
|
||||
CompareVersion CompareTarget = iota
|
||||
CompareCreated
|
||||
CompareModified
|
||||
CompareValue
|
||||
)
|
||||
|
||||
type Cmp pb.Compare
|
||||
|
||||
func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
var r pb.Compare_CompareResult
|
||||
|
||||
switch result {
|
||||
case "=":
|
||||
r = pb.Compare_EQUAL
|
||||
case "!=":
|
||||
r = pb.Compare_NOT_EQUAL
|
||||
case ">":
|
||||
r = pb.Compare_GREATER
|
||||
case "<":
|
||||
r = pb.Compare_LESS
|
||||
default:
|
||||
panic("Unknown result op")
|
||||
}
|
||||
|
||||
cmp.Result = r
|
||||
switch cmp.Target {
|
||||
case pb.Compare_VALUE:
|
||||
val, ok := v.(string)
|
||||
if !ok {
|
||||
panic("bad compare value")
|
||||
}
|
||||
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
|
||||
case pb.Compare_VERSION:
|
||||
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
|
||||
case pb.Compare_CREATE:
|
||||
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||
case pb.Compare_MOD:
|
||||
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||
default:
|
||||
panic("Unknown compare type")
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
func Value(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
|
||||
}
|
||||
|
||||
func Version(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
|
||||
}
|
||||
|
||||
func CreateRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
|
||||
}
|
||||
|
||||
func ModRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the comparison key.
|
||||
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
|
||||
|
||||
// ValueBytes returns the byte slice holding the comparison value, if any.
|
||||
func (cmp *Cmp) ValueBytes() []byte {
|
||||
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
|
||||
return tu.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// WithRange sets the comparison to scan the range [key, end).
|
||||
func (cmp Cmp) WithRange(end string) Cmp {
|
||||
cmp.RangeEnd = []byte(end)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// WithPrefix sets the comparison to scan all keys prefixed by the key.
|
||||
func (cmp Cmp) WithPrefix() Cmp {
|
||||
cmp.RangeEnd = getPrefix(cmp.Key)
|
||||
return cmp
|
||||
}
|
||||
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := val.(int); ok {
|
||||
return int64(v)
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
17
vendor/github.com/coreos/etcd/clientv3.old/concurrency/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3.old/concurrency/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package concurrency implements concurrency operations on top of
|
||||
// etcd such as distributed locks, barriers, and elections.
|
||||
package concurrency
|
243
vendor/github.com/coreos/etcd/clientv3.old/concurrency/election.go
generated
vendored
Normal file
243
vendor/github.com/coreos/etcd/clientv3.old/concurrency/election.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrElectionNotLeader = errors.New("election: not leader")
|
||||
ErrElectionNoLeader = errors.New("election: no leader")
|
||||
)
|
||||
|
||||
type Election struct {
|
||||
session *Session
|
||||
|
||||
keyPrefix string
|
||||
|
||||
leaderKey string
|
||||
leaderRev int64
|
||||
leaderSession *Session
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
// NewElection returns a new election on a given key prefix.
|
||||
func NewElection(s *Session, pfx string) *Election {
|
||||
return &Election{session: s, keyPrefix: pfx + "/"}
|
||||
}
|
||||
|
||||
// ResumeElection initializes an election with a known leader.
|
||||
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
||||
return &Election{
|
||||
session: s,
|
||||
leaderKey: leaderKey,
|
||||
leaderRev: leaderRev,
|
||||
leaderSession: s,
|
||||
}
|
||||
}
|
||||
|
||||
// Campaign puts a value as eligible for the election. It blocks until
|
||||
// it is elected, an error occurs, or the context is cancelled.
|
||||
func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||
s := e.session
|
||||
client := e.session.Client()
|
||||
|
||||
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
|
||||
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
||||
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
||||
txn = txn.Else(v3.OpGet(k))
|
||||
resp, err := txn.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
||||
if !resp.Succeeded {
|
||||
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
||||
e.leaderRev = kv.CreateRevision
|
||||
if string(kv.Value) != val {
|
||||
if err = e.Proclaim(ctx, val); err != nil {
|
||||
e.Resign(ctx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||
if err != nil {
|
||||
// clean up in case of context cancel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.Resign(client.Ctx())
|
||||
default:
|
||||
e.leaderSession = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
e.hdr = resp.Header
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proclaim lets the leader announce a new value without another election.
|
||||
func (e *Election) Proclaim(ctx context.Context, val string) error {
|
||||
if e.leaderSession == nil {
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
txn := client.Txn(ctx).If(cmp)
|
||||
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
||||
tresp, terr := txn.Commit()
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
if !tresp.Succeeded {
|
||||
e.leaderKey = ""
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
|
||||
e.hdr = tresp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resign lets a leader start a new election.
|
||||
func (e *Election) Resign(ctx context.Context) (err error) {
|
||||
if e.leaderSession == nil {
|
||||
return nil
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
|
||||
if err == nil {
|
||||
e.hdr = resp.Header
|
||||
}
|
||||
e.leaderKey = ""
|
||||
e.leaderSession = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Leader returns the leader value for the current election.
|
||||
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
|
||||
client := e.session.Client()
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(resp.Kvs) == 0 {
|
||||
// no leader currently elected
|
||||
return nil, ErrElectionNoLeader
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Observe returns a channel that reliably observes ordered leader proposals
|
||||
// as GetResponse values on every current elected leader key. It will not
|
||||
// necessarily fetch all historical leader updates, but will always post the
|
||||
// most recent leader value.
|
||||
//
|
||||
// The channel closes when the context is canceled or the underlying watcher
|
||||
// is otherwise disrupted.
|
||||
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
|
||||
retc := make(chan v3.GetResponse)
|
||||
go e.observe(ctx, retc)
|
||||
return retc
|
||||
}
|
||||
|
||||
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
client := e.session.Client()
|
||||
|
||||
defer close(ch)
|
||||
for {
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var kv *mvccpb.KeyValue
|
||||
var hdr *pb.ResponseHeader
|
||||
|
||||
if len(resp.Kvs) == 0 {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
// wait for first key put on prefix
|
||||
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
||||
wch := client.Watch(cctx, e.keyPrefix, opts...)
|
||||
for kv == nil {
|
||||
wr, ok := <-wch
|
||||
if !ok || wr.Err() != nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
// only accept PUTs; a DELETE will make observe() spin
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.PUT {
|
||||
hdr, kv = &wr.Header, ev.Kv
|
||||
// may have multiple revs; hdr.rev = the last rev
|
||||
// set to kv's rev in case batch has multiple PUTs
|
||||
hdr.Revision = kv.ModRevision
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
} else {
|
||||
hdr, kv = resp.Header, resp.Kvs[0]
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
|
||||
keyDeleted := false
|
||||
for !keyDeleted {
|
||||
wr, ok := <-wch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
keyDeleted = true
|
||||
break
|
||||
}
|
||||
resp.Header = &wr.Header
|
||||
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
|
||||
select {
|
||||
case ch <- *resp:
|
||||
case <-cctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the leader key if elected, empty string otherwise.
|
||||
func (e *Election) Key() string { return e.leaderKey }
|
||||
|
||||
// Rev returns the leader key's creation revision, if elected.
|
||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||
|
||||
// Header is the response header from the last successful election proposal.
|
||||
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
65
vendor/github.com/coreos/etcd/clientv3.old/concurrency/key.go
generated
vendored
Normal file
65
vendor/github.com/coreos/etcd/clientv3.old/concurrency/key.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wr v3.WatchResponse
|
||||
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
||||
for wr = range wch {
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := wr.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("lost watcher waiting for delete")
|
||||
}
|
||||
|
||||
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||
// than the create revision.
|
||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
||||
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||
for {
|
||||
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Kvs) == 0 {
|
||||
return resp.Header, nil
|
||||
}
|
||||
lastKey := string(resp.Kvs[0].Key)
|
||||
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
118
vendor/github.com/coreos/etcd/clientv3.old/concurrency/mutex.go
generated
vendored
Normal file
118
vendor/github.com/coreos/etcd/clientv3.old/concurrency/mutex.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Mutex implements the sync Locker interface with etcd
|
||||
type Mutex struct {
|
||||
s *Session
|
||||
|
||||
pfx string
|
||||
myKey string
|
||||
myRev int64
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
func NewMutex(s *Session, pfx string) *Mutex {
|
||||
return &Mutex{s, pfx + "/", "", -1, nil}
|
||||
}
|
||||
|
||||
// Lock locks the mutex with a cancelable context. If the context is canceled
|
||||
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||
func (m *Mutex) Lock(ctx context.Context) error {
|
||||
s := m.s
|
||||
client := m.s.Client()
|
||||
|
||||
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
|
||||
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
||||
// put self in lock waiters via myKey; oldest waiter holds lock
|
||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||
// reuse key in case this session already holds the lock
|
||||
get := v3.OpGet(m.myKey)
|
||||
// fetch current holder to complete uncontended path with only one RPC
|
||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.myRev = resp.Header.Revision
|
||||
if !resp.Succeeded {
|
||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||
}
|
||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||
m.hdr = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for deletion revisions prior to myKey
|
||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
// release lock key if cancelled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.Unlock(client.Ctx())
|
||||
default:
|
||||
m.hdr = hdr
|
||||
}
|
||||
return werr
|
||||
}
|
||||
|
||||
func (m *Mutex) Unlock(ctx context.Context) error {
|
||||
client := m.s.Client()
|
||||
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||
return err
|
||||
}
|
||||
m.myKey = "\x00"
|
||||
m.myRev = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mutex) IsOwner() v3.Cmp {
|
||||
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
|
||||
}
|
||||
|
||||
func (m *Mutex) Key() string { return m.myKey }
|
||||
|
||||
// Header is the response header received from etcd on acquiring the lock.
|
||||
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
|
||||
|
||||
type lockerMutex struct{ *Mutex }
|
||||
|
||||
func (lm *lockerMutex) Lock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func (lm *lockerMutex) Unlock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
||||
func NewLocker(s *Session, pfx string) sync.Locker {
|
||||
return &lockerMutex{NewMutex(s, pfx)}
|
||||
}
|
140
vendor/github.com/coreos/etcd/clientv3.old/concurrency/session.go
generated
vendored
Normal file
140
vendor/github.com/coreos/etcd/clientv3.old/concurrency/session.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const defaultSessionTTL = 60
|
||||
|
||||
// Session represents a lease kept alive for the lifetime of a client.
|
||||
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||
type Session struct {
|
||||
client *v3.Client
|
||||
opts *sessionOptions
|
||||
id v3.LeaseID
|
||||
|
||||
cancel context.CancelFunc
|
||||
donec <-chan struct{}
|
||||
}
|
||||
|
||||
// NewSession gets the leased session for a client.
|
||||
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
|
||||
for _, opt := range opts {
|
||||
opt(ops)
|
||||
}
|
||||
|
||||
id := ops.leaseID
|
||||
if id == v3.NoLease {
|
||||
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id = v3.LeaseID(resp.ID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
|
||||
|
||||
// keep the lease alive until client error or cancelled context
|
||||
go func() {
|
||||
defer close(donec)
|
||||
for range keepAlive {
|
||||
// eat messages until keep alive channel closes
|
||||
}
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Client is the etcd client that is attached to the session.
|
||||
func (s *Session) Client() *v3.Client {
|
||||
return s.client
|
||||
}
|
||||
|
||||
// Lease is the lease ID for keys bound to the session.
|
||||
func (s *Session) Lease() v3.LeaseID { return s.id }
|
||||
|
||||
// Done returns a channel that closes when the lease is orphaned, expires, or
|
||||
// is otherwise no longer being refreshed.
|
||||
func (s *Session) Done() <-chan struct{} { return s.donec }
|
||||
|
||||
// Orphan ends the refresh for the session lease. This is useful
|
||||
// in case the state of the client connection is indeterminate (revoke
|
||||
// would fail) or when transferring lease ownership.
|
||||
func (s *Session) Orphan() {
|
||||
s.cancel()
|
||||
<-s.donec
|
||||
}
|
||||
|
||||
// Close orphans the session and revokes the session lease.
|
||||
func (s *Session) Close() error {
|
||||
s.Orphan()
|
||||
// if revoke takes longer than the ttl, lease is expired anyway
|
||||
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
|
||||
_, err := s.client.Revoke(ctx, s.id)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
type sessionOptions struct {
|
||||
ttl int
|
||||
leaseID v3.LeaseID
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// SessionOption configures Session.
|
||||
type SessionOption func(*sessionOptions)
|
||||
|
||||
// WithTTL configures the session's TTL in seconds.
|
||||
// If TTL is <= 0, the default 60 seconds TTL will be used.
|
||||
func WithTTL(ttl int) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
if ttl > 0 {
|
||||
so.ttl = ttl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithLease specifies the existing leaseID to be used for the session.
|
||||
// This is useful in process restart scenario, for example, to reclaim
|
||||
// leadership from an election prior to restart.
|
||||
func WithLease(leaseID v3.LeaseID) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.leaseID = leaseID
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext assigns a context to the session instead of defaulting to
|
||||
// using the client context. This is useful for canceling NewSession and
|
||||
// Close operations immediately without having to close the client. If the
|
||||
// context is canceled before Close() completes, the session's lease will be
|
||||
// abandoned and left to expire instead of being revoked.
|
||||
func WithContext(ctx context.Context) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.ctx = ctx
|
||||
}
|
||||
}
|
386
vendor/github.com/coreos/etcd/clientv3.old/concurrency/stm.go
generated
vendored
Normal file
386
vendor/github.com/coreos/etcd/clientv3.old/concurrency/stm.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// STM is an interface for software transactional memory.
|
||||
type STM interface {
|
||||
// Get returns the value for a key and inserts the key in the txn's read set.
|
||||
// If Get fails, it aborts the transaction with an error, never returning.
|
||||
Get(key ...string) string
|
||||
// Put adds a value for a key to the write set.
|
||||
Put(key, val string, opts ...v3.OpOption)
|
||||
// Rev returns the revision of a key in the read set.
|
||||
Rev(key string) int64
|
||||
// Del deletes a key.
|
||||
Del(key string)
|
||||
|
||||
// commit attempts to apply the txn's changes to the server.
|
||||
commit() *v3.TxnResponse
|
||||
reset()
|
||||
}
|
||||
|
||||
// Isolation is an enumeration of transactional isolation levels which
|
||||
// describes how transactions should interfere and conflict.
|
||||
type Isolation int
|
||||
|
||||
const (
|
||||
// SerializableSnapshot provides serializable isolation and also checks
|
||||
// for write conflicts.
|
||||
SerializableSnapshot Isolation = iota
|
||||
// Serializable reads within the same transaction attempt return data
|
||||
// from the at the revision of the first read.
|
||||
Serializable
|
||||
// RepeatableReads reads within the same transaction attempt always
|
||||
// return the same data.
|
||||
RepeatableReads
|
||||
// ReadCommitted reads keys from any committed revision.
|
||||
ReadCommitted
|
||||
)
|
||||
|
||||
// stmError safely passes STM errors through panic to the STM error channel.
|
||||
type stmError struct{ err error }
|
||||
|
||||
type stmOptions struct {
|
||||
iso Isolation
|
||||
ctx context.Context
|
||||
prefetch []string
|
||||
}
|
||||
|
||||
type stmOption func(*stmOptions)
|
||||
|
||||
// WithIsolation specifies the transaction isolation level.
|
||||
func WithIsolation(lvl Isolation) stmOption {
|
||||
return func(so *stmOptions) { so.iso = lvl }
|
||||
}
|
||||
|
||||
// WithAbortContext specifies the context for permanently aborting the transaction.
|
||||
func WithAbortContext(ctx context.Context) stmOption {
|
||||
return func(so *stmOptions) { so.ctx = ctx }
|
||||
}
|
||||
|
||||
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
|
||||
// If an STM transaction will unconditionally fetch a set of keys, prefetching
|
||||
// those keys will save the round-trip cost from requesting each key one by one
|
||||
// with Get().
|
||||
func WithPrefetch(keys ...string) stmOption {
|
||||
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||
}
|
||||
|
||||
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
|
||||
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||
opts := &stmOptions{ctx: c.Ctx()}
|
||||
for _, f := range so {
|
||||
f(opts)
|
||||
}
|
||||
if len(opts.prefetch) != 0 {
|
||||
f := apply
|
||||
apply = func(s STM) error {
|
||||
s.Get(opts.prefetch...)
|
||||
return f(s)
|
||||
}
|
||||
}
|
||||
return runSTM(mkSTM(c, opts), apply)
|
||||
}
|
||||
|
||||
func mkSTM(c *v3.Client, opts *stmOptions) STM {
|
||||
switch opts.iso {
|
||||
case SerializableSnapshot:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp {
|
||||
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
|
||||
}
|
||||
return s
|
||||
case Serializable:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case RepeatableReads:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case ReadCommitted:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return nil }
|
||||
return s
|
||||
default:
|
||||
panic("unsupported stm")
|
||||
}
|
||||
}
|
||||
|
||||
type stmResponse struct {
|
||||
resp *v3.TxnResponse
|
||||
err error
|
||||
}
|
||||
|
||||
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
outc := make(chan stmResponse, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
e, ok := r.(stmError)
|
||||
if !ok {
|
||||
// client apply panicked
|
||||
panic(r)
|
||||
}
|
||||
outc <- stmResponse{nil, e.err}
|
||||
}
|
||||
}()
|
||||
var out stmResponse
|
||||
for {
|
||||
s.reset()
|
||||
if out.err = apply(s); out.err != nil {
|
||||
break
|
||||
}
|
||||
if out.resp = s.commit(); out.resp != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
outc <- out
|
||||
}()
|
||||
r := <-outc
|
||||
return r.resp, r.err
|
||||
}
|
||||
|
||||
// stm implements repeatable-read software transactional memory over etcd
|
||||
type stm struct {
|
||||
client *v3.Client
|
||||
ctx context.Context
|
||||
// rset holds read key values and revisions
|
||||
rset readSet
|
||||
// wset holds overwritten keys and their values
|
||||
wset writeSet
|
||||
// getOpts are the opts used for gets
|
||||
getOpts []v3.OpOption
|
||||
// conflicts computes the current conflicts on the txn
|
||||
conflicts func() []v3.Cmp
|
||||
}
|
||||
|
||||
type stmPut struct {
|
||||
val string
|
||||
op v3.Op
|
||||
}
|
||||
|
||||
type readSet map[string]*v3.GetResponse
|
||||
|
||||
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||
for i, resp := range txnresp.Responses {
|
||||
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
|
||||
}
|
||||
}
|
||||
|
||||
func (rs readSet) first() int64 {
|
||||
ret := int64(math.MaxInt64 - 1)
|
||||
for _, resp := range rs {
|
||||
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret {
|
||||
ret = resp.Kvs[0].ModRevision
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// cmps guards the txn from updates to read set
|
||||
func (rs readSet) cmps() []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(rs))
|
||||
for k, rk := range rs {
|
||||
cmps = append(cmps, isKeyCurrent(k, rk))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
type writeSet map[string]stmPut
|
||||
|
||||
func (ws writeSet) get(keys ...string) *stmPut {
|
||||
for _, key := range keys {
|
||||
if wv, ok := ws[key]; ok {
|
||||
return &wv
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cmps returns a cmp list testing no writes have happened past rev
|
||||
func (ws writeSet) cmps(rev int64) []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(ws))
|
||||
for key := range ws {
|
||||
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
// puts is the list of ops for all pending writes
|
||||
func (ws writeSet) puts() []v3.Op {
|
||||
puts := make([]v3.Op, 0, len(ws))
|
||||
for _, v := range ws {
|
||||
puts = append(puts, v.op)
|
||||
}
|
||||
return puts
|
||||
}
|
||||
|
||||
func (s *stm) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
return respToValue(s.fetch(keys...))
|
||||
}
|
||||
|
||||
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
|
||||
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
|
||||
}
|
||||
|
||||
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
|
||||
|
||||
func (s *stm) Rev(key string) int64 {
|
||||
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
|
||||
return resp.Kvs[0].ModRevision
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *stm) commit() *v3.TxnResponse {
|
||||
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stm) fetch(keys ...string) *v3.GetResponse {
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
ops := make([]v3.Op, len(keys))
|
||||
for i, key := range keys {
|
||||
if resp, ok := s.rset[key]; ok {
|
||||
return resp
|
||||
}
|
||||
ops[i] = v3.OpGet(key, s.getOpts...)
|
||||
}
|
||||
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
s.rset.add(keys, txnresp)
|
||||
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
|
||||
}
|
||||
|
||||
func (s *stm) reset() {
|
||||
s.rset = make(map[string]*v3.GetResponse)
|
||||
s.wset = make(map[string]stmPut)
|
||||
}
|
||||
|
||||
type stmSerializable struct {
|
||||
stm
|
||||
prefetch map[string]*v3.GetResponse
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
firstRead := len(s.rset) == 0
|
||||
for _, key := range keys {
|
||||
if resp, ok := s.prefetch[key]; ok {
|
||||
delete(s.prefetch, key)
|
||||
s.rset[key] = resp
|
||||
}
|
||||
}
|
||||
resp := s.stm.fetch(keys...)
|
||||
if firstRead {
|
||||
// txn's base revision is defined by the first read
|
||||
s.getOpts = []v3.OpOption{
|
||||
v3.WithRev(resp.Header.Revision),
|
||||
v3.WithSerializable(),
|
||||
}
|
||||
}
|
||||
return respToValue(resp)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Rev(key string) int64 {
|
||||
s.Get(key)
|
||||
return s.stm.Rev(key)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) gets() ([]string, []v3.Op) {
|
||||
keys := make([]string, 0, len(s.rset))
|
||||
ops := make([]v3.Op, 0, len(s.rset))
|
||||
for k := range s.rset {
|
||||
keys = append(keys, k)
|
||||
ops = append(ops, v3.OpGet(k))
|
||||
}
|
||||
return keys, ops
|
||||
}
|
||||
|
||||
func (s *stmSerializable) commit() *v3.TxnResponse {
|
||||
keys, getops := s.gets()
|
||||
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
|
||||
// use Else to prefetch keys in case of conflict to save a round trip
|
||||
txnresp, err := txn.Else(getops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
// load prefetch with Else data
|
||||
s.rset.add(keys, txnresp)
|
||||
s.prefetch = s.rset
|
||||
s.getOpts = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
|
||||
if len(r.Kvs) != 0 {
|
||||
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
|
||||
}
|
||||
return v3.Compare(v3.ModRevision(k), "=", 0)
|
||||
}
|
||||
|
||||
func respToValue(resp *v3.GetResponse) string {
|
||||
if resp == nil || len(resp.Kvs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(resp.Kvs[0].Value)
|
||||
}
|
||||
|
||||
// NewSTMRepeatable is deprecated.
|
||||
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
|
||||
}
|
||||
|
||||
// NewSTMSerializable is deprecated.
|
||||
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
|
||||
}
|
||||
|
||||
// NewSTMReadCommitted is deprecated.
|
||||
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
|
||||
}
|
62
vendor/github.com/coreos/etcd/clientv3.old/config.go
generated
vendored
Normal file
62
vendor/github.com/coreos/etcd/clientv3.old/config.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs.
|
||||
Endpoints []string `json:"endpoints"`
|
||||
|
||||
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
Password string `json:"password"`
|
||||
|
||||
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||
|
||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||
DialOptions []grpc.DialOption
|
||||
|
||||
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||
// other operations that do not have an explicit context.
|
||||
Context context.Context
|
||||
}
|
64
vendor/github.com/coreos/etcd/clientv3.old/doc.go
generated
vendored
Normal file
64
vendor/github.com/coreos/etcd/clientv3.old/doc.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clientv3 implements the official Go etcd client for v3.
|
||||
//
|
||||
// Create client using `clientv3.New`:
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
// DialTimeout: 5 * time.Second,
|
||||
// })
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// defer cli.Close()
|
||||
//
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
// cancel()
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// // use the response
|
||||
//
|
||||
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// etcd client returns 2 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
// resp, err := kvc.Put(ctx, "", "")
|
||||
// if err != nil {
|
||||
// if err == context.Canceled {
|
||||
// // ctx is canceled by another routine
|
||||
// } else if err == context.DeadlineExceeded {
|
||||
// // ctx is attached with a deadline and it exceeded
|
||||
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
|
||||
// // process (verr.Errors)
|
||||
// } else {
|
||||
// // bad cluster endpoints, which are not etcd servers
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package clientv3
|
103
vendor/github.com/coreos/etcd/clientv3.old/example_cluster_test.go
generated
vendored
Normal file
103
vendor/github.com/coreos/etcd/clientv3.old/example_cluster_test.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleCluster_memberList() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("members:", len(resp.Members))
|
||||
// Output: members: 3
|
||||
}
|
||||
|
||||
func ExampleCluster_memberAdd() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints[:2],
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
peerURLs := endpoints[2:]
|
||||
mresp, err := cli.MemberAdd(context.Background(), peerURLs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("added member.PeerURLs:", mresp.Member.PeerURLs)
|
||||
// added member.PeerURLs: [http://localhost:32380]
|
||||
}
|
||||
|
||||
func ExampleCluster_memberRemove() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints[1:],
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.MemberRemove(context.Background(), resp.Members[0].ID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCluster_memberUpdate() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
peerURLs := []string{"http://localhost:12380"}
|
||||
_, err = cli.MemberUpdate(context.Background(), resp.Members[0].ID, peerURLs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
279
vendor/github.com/coreos/etcd/clientv3.old/example_kv_test.go
generated
vendored
Normal file
279
vendor/github.com/coreos/etcd/clientv3.old/example_kv_test.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleKV_put() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, "sample_key", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKV_putErrorHandling() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, "", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
switch err {
|
||||
case context.Canceled:
|
||||
fmt.Printf("ctx is canceled by another routine: %v\n", err)
|
||||
case context.DeadlineExceeded:
|
||||
fmt.Printf("ctx is attached with a deadline is exceeded: %v\n", err)
|
||||
case rpctypes.ErrEmptyKey:
|
||||
fmt.Printf("client-side error: %v\n", err)
|
||||
default:
|
||||
fmt.Printf("bad cluster endpoints, which are not etcd servers: %v\n", err)
|
||||
}
|
||||
}
|
||||
// Output: client-side error: etcdserver: key is not provided
|
||||
}
|
||||
|
||||
func ExampleKV_get() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: foo : bar
|
||||
}
|
||||
|
||||
func ExampleKV_getWithRev() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
presp, err := cli.Put(context.TODO(), "foo", "bar1")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar2")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo", clientv3.WithRev(presp.Header.Revision))
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: foo : bar1
|
||||
}
|
||||
|
||||
func ExampleKV_getSortedPrefix() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
for i := range make([]int, 3) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, fmt.Sprintf("key_%d", i), "value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output:
|
||||
// key_2 : value
|
||||
// key_1 : value
|
||||
// key_0 : value
|
||||
}
|
||||
|
||||
func ExampleKV_delete() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// count keys about to be deleted
|
||||
gresp, err := cli.Get(ctx, "key", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// delete the keys
|
||||
dresp, err := cli.Delete(ctx, "key", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Deleted all keys:", int64(len(gresp.Kvs)) == dresp.Deleted)
|
||||
// Output:
|
||||
// Deleted all keys: true
|
||||
}
|
||||
|
||||
func ExampleKV_compact() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
compRev := resp.Header.Revision // specify compact revision of your choice
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Compact(ctx, compRev)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKV_txn() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
_, err = kvc.Put(context.TODO(), "key", "xyz")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = kvc.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
|
||||
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
|
||||
Else(clientv3.OpPut("key", "ABC")).
|
||||
Commit()
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
gresp, err := kvc.Get(context.TODO(), "key")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range gresp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: key : XYZ
|
||||
}
|
||||
|
||||
func ExampleKV_do() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ops := []clientv3.Op{
|
||||
clientv3.OpPut("put-key", "123"),
|
||||
clientv3.OpGet("put-key"),
|
||||
clientv3.OpPut("put-key", "456")}
|
||||
|
||||
for _, op := range ops {
|
||||
if _, err := cli.Do(context.TODO(), op); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
141
vendor/github.com/coreos/etcd/clientv3.old/example_lease_test.go
generated
vendored
Normal file
141
vendor/github.com/coreos/etcd/clientv3.old/example_lease_test.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleLease_grant() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// minimum lease TTL is 5-second
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// after 5 seconds, the key 'foo' will be removed
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleLease_revoke() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// revoking lease expires the key attached to its lease ID
|
||||
_, err = cli.Revoke(context.TODO(), resp.ID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
gresp, err := cli.Get(context.TODO(), "foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("number of keys:", len(gresp.Kvs))
|
||||
// Output: number of keys: 0
|
||||
}
|
||||
|
||||
func ExampleLease_keepAlive() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// the key 'foo' will be kept forever
|
||||
ch, kaerr := cli.KeepAlive(context.TODO(), resp.ID)
|
||||
if kaerr != nil {
|
||||
log.Fatal(kaerr)
|
||||
}
|
||||
|
||||
ka := <-ch
|
||||
fmt.Println("ttl:", ka.TTL)
|
||||
// Output: ttl: 5
|
||||
}
|
||||
|
||||
func ExampleLease_keepAliveOnce() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// to renew the lease only once
|
||||
ka, kaerr := cli.KeepAliveOnce(context.TODO(), resp.ID)
|
||||
if kaerr != nil {
|
||||
log.Fatal(kaerr)
|
||||
}
|
||||
|
||||
fmt.Println("ttl:", ka.TTL)
|
||||
// Output: ttl: 5
|
||||
}
|
68
vendor/github.com/coreos/etcd/clientv3.old/example_maintenence_test.go
generated
vendored
Normal file
68
vendor/github.com/coreos/etcd/clientv3.old/example_maintenence_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
func ExampleMaintenance_status() {
|
||||
for _, ep := range endpoints {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// resp, err := cli.Status(context.Background(), ep)
|
||||
//
|
||||
// or
|
||||
//
|
||||
mapi := clientv3.NewMaintenance(cli)
|
||||
resp, err := mapi.Status(context.Background(), ep)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
}
|
||||
// endpoint: localhost:2379 / IsLeader: false
|
||||
// endpoint: localhost:22379 / IsLeader: false
|
||||
// endpoint: localhost:32379 / IsLeader: true
|
||||
}
|
||||
|
||||
func ExampleMaintenance_defragment() {
|
||||
for _, ep := range endpoints {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
if _, err = cli.Defragment(context.TODO(), ep); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
84
vendor/github.com/coreos/etcd/clientv3.old/example_metrics_test.go
generated
vendored
Normal file
84
vendor/github.com/coreos/etcd/clientv3.old/example_metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func ExampleClient_metrics() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// get a key so it shows up in the metrics as a range rpc
|
||||
cli.Get(context.TODO(), "test_key")
|
||||
|
||||
// listen for all prometheus metrics
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
http.Serve(ln, prometheus.Handler())
|
||||
}()
|
||||
defer func() {
|
||||
ln.Close()
|
||||
<-donec
|
||||
}()
|
||||
|
||||
// make an http request to fetch all prometheus metrics
|
||||
url := "http://" + ln.Addr().String() + "/metrics"
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: %v", err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: reading %s: %v", url, err)
|
||||
}
|
||||
|
||||
// confirm range request in metrics
|
||||
for _, l := range strings.Split(string(b), "\n") {
|
||||
if strings.Contains(l, `grpc_client_started_total{grpc_method="Range"`) {
|
||||
fmt.Println(l)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
}
|
76
vendor/github.com/coreos/etcd/clientv3.old/example_test.go
generated
vendored
Normal file
76
vendor/github.com/coreos/etcd/clientv3.old/example_test.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
dialTimeout = 5 * time.Second
|
||||
requestTimeout = 10 * time.Second
|
||||
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
|
||||
)
|
||||
|
||||
func Example() {
|
||||
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
|
||||
clientv3.SetLogger(plog)
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close() // make sure to close the client
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConfig_withTLS() {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: "/tmp/test-certs/test-name-1.pem",
|
||||
KeyFile: "/tmp/test-certs/test-name-1-key.pem",
|
||||
TrustedCAFile: "/tmp/test-certs/trusted-ca.pem",
|
||||
}
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
TLS: tlsConfig,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close() // make sure to close the client
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
100
vendor/github.com/coreos/etcd/clientv3.old/example_watch_test.go
generated
vendored
Normal file
100
vendor/github.com/coreos/etcd/clientv3.old/example_watch_test.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleWatcher_watch() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo")
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithPrefix() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo", clientv3.WithPrefix())
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo1" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithRange() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// watches within ['foo1', 'foo4'), in lexicographical order
|
||||
rch := cli.Watch(context.Background(), "foo1", clientv3.WithRange("foo4"))
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo1" : "bar"
|
||||
// PUT "foo2" : "bar"
|
||||
// PUT "foo3" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithProgressNotify() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo", clientv3.WithProgressNotify())
|
||||
wresp := <-rch
|
||||
fmt.Printf("wresp.Header.Revision: %d\n", wresp.Header.Revision)
|
||||
fmt.Println("wresp.IsProgressNotify:", wresp.IsProgressNotify())
|
||||
// wresp.Header.Revision: 0
|
||||
// wresp.IsProgressNotify: true
|
||||
}
|
128
vendor/github.com/coreos/etcd/clientv3.old/integration/cluster_test.go
generated
vendored
Normal file
128
vendor/github.com/coreos/etcd/clientv3.old/integration/cluster_test.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestMemberList(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Members) != 3 {
|
||||
t.Errorf("number of members = %d, want %d", len(resp.Members), 3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberAdd(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
|
||||
urls := []string{"http://127.0.0.1:1234"}
|
||||
resp, err := capi.MemberAdd(context.Background(), urls)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add member %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.Member.PeerURLs, urls) {
|
||||
t.Errorf("urls = %v, want %v", urls, resp.Member.PeerURLs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberRemove(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.Client(1)
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
rmvID := resp.Members[0].ID
|
||||
// indexes in capi member list don't necessarily match cluster member list;
|
||||
// find member that is not the client to remove
|
||||
for _, m := range resp.Members {
|
||||
mURLs, _ := types.NewURLs(m.PeerURLs)
|
||||
if !reflect.DeepEqual(mURLs, clus.Members[1].ServerConfig.PeerURLs) {
|
||||
rmvID = m.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_, err = capi.MemberRemove(context.Background(), rmvID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to remove member %v", err)
|
||||
}
|
||||
|
||||
resp, err = capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Members) != 2 {
|
||||
t.Errorf("number of members = %d, want %d", len(resp.Members), 2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberUpdate(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
urls := []string{"http://127.0.0.1:1234"}
|
||||
_, err = capi.MemberUpdate(context.Background(), resp.Members[0].ID, urls)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update member %v", err)
|
||||
}
|
||||
|
||||
resp, err = capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.Members[0].PeerURLs, urls) {
|
||||
t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs)
|
||||
}
|
||||
}
|
191
vendor/github.com/coreos/etcd/clientv3.old/integration/dial_test.go
generated
vendored
Normal file
191
vendor/github.com/coreos/etcd/clientv3.old/integration/dial_test.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: "../../integration/fixtures/server.key.insecure",
|
||||
CertFile: "../../integration/fixtures/server.crt",
|
||||
TrustedCAFile: "../../integration/fixtures/ca.crt",
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
|
||||
testTLSInfoExpired = transport.TLSInfo{
|
||||
KeyFile: "../../integration/fixtures-expired/server-key.pem",
|
||||
CertFile: "../../integration/fixtures-expired/server.pem",
|
||||
TrustedCAFile: "../../integration/fixtures-expired/etcd-root-ca.pem",
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
|
||||
// TestDialTLSExpired tests client with expired certs fails to dial.
|
||||
func TestDialTLSExpired(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tls, err := testTLSInfoExpired.ClientConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect remote errors 'tls: bad certificate'
|
||||
_, err = clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialTimeout: 3 * time.Second,
|
||||
TLS: tls,
|
||||
})
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDialTLSNoConfig ensures the client fails to dial / times out
|
||||
// when TLS endpoints (https, unixs) are given but no tls config.
|
||||
func TestDialTLSNoConfig(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
|
||||
defer clus.Terminate(t)
|
||||
// expect 'signed by unknown authority'
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialTimeout: time.Second,
|
||||
})
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func TestDialSetEndpointsBeforeFail(t *testing.T) {
|
||||
testDialSetEndpoints(t, true)
|
||||
}
|
||||
|
||||
func TestDialSetEndpointsAfterFail(t *testing.T) {
|
||||
testDialSetEndpoints(t, false)
|
||||
}
|
||||
|
||||
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 3)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
toKill := rand.Intn(len(eps))
|
||||
|
||||
cfg := clientv3.Config{Endpoints: []string{eps[toKill]}, DialTimeout: 1 * time.Second}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
if setBefore {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
// make a dead node
|
||||
clus.Members[toKill].Stop(t)
|
||||
clus.WaitLeader(t)
|
||||
|
||||
if !setBefore {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
|
||||
// with a new one that doesn't include original endpoint.
|
||||
func TestSwitchSetEndpoints(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get non partitioned members endpoints
|
||||
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].InjectPartition(t, clus.Members[1:])
|
||||
|
||||
cli.SetEndpoints(eps...)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if _, err := cli.Get(ctx, "foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejectOldCluster(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
// 2 endpoints to test multi-endpoint Status
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()},
|
||||
DialTimeout: 5 * time.Second,
|
||||
RejectOldCluster: true,
|
||||
}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli.Close()
|
||||
}
|
||||
|
||||
// TestDialForeignEndpoint checks an endpoint that is not registered
|
||||
// with the balancer can be dialed.
|
||||
func TestDialForeignEndpoint(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// grpc can return a lazy connection that's not connected yet; confirm
|
||||
// that it can communicate with the cluster.
|
||||
kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn))
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
defer cancel()
|
||||
if _, gerr := kvc.Get(ctx, "abc"); gerr != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
17
vendor/github.com/coreos/etcd/clientv3.old/integration/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3.old/integration/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package integration implements tests built upon embedded etcd, and focuses on
|
||||
// correctness of etcd client.
|
||||
package integration
|
897
vendor/github.com/coreos/etcd/clientv3.old/integration/kv_test.go
generated
vendored
Normal file
897
vendor/github.com/coreos/etcd/clientv3.old/integration/kv_test.go
generated
vendored
Normal file
@ -0,0 +1,897 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestKVPutError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
|
||||
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
|
||||
)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
_, err := kv.Put(ctx, "", "bar")
|
||||
if err != rpctypes.ErrEmptyKey {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
|
||||
if err != rpctypes.ErrRequestTooLarge {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
|
||||
if err != nil { // below quota
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second) // give enough time for commit
|
||||
|
||||
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
|
||||
if err != rpctypes.ErrNoSpace { // over quota
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVPut(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
key, val string
|
||||
leaseID clientv3.LeaseID
|
||||
}{
|
||||
{"foo", "bar", clientv3.NoLease},
|
||||
{"hello", "world", resp.ID},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
if _, err := kv.Put(ctx, tt.key, tt.val, clientv3.WithLease(tt.leaseID)); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", i, tt.key, err)
|
||||
}
|
||||
resp, err := kv.Get(ctx, tt.key)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't get key (%v)", i, err)
|
||||
}
|
||||
if len(resp.Kvs) != 1 {
|
||||
t.Fatalf("#%d: expected 1 key, got %d", i, len(resp.Kvs))
|
||||
}
|
||||
if !bytes.Equal([]byte(tt.val), resp.Kvs[0].Value) {
|
||||
t.Errorf("#%d: val = %s, want %s", i, tt.val, resp.Kvs[0].Value)
|
||||
}
|
||||
if tt.leaseID != clientv3.LeaseID(resp.Kvs[0].Lease) {
|
||||
t.Errorf("#%d: val = %d, want %d", i, tt.leaseID, resp.Kvs[0].Lease)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
|
||||
func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue())
|
||||
if err != rpctypes.ErrKeyNotFound {
|
||||
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rr, rerr := kv.Get(context.TODO(), "foo")
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
if len(rr.Kvs) != 1 {
|
||||
t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
|
||||
}
|
||||
if !bytes.Equal(rr.Kvs[0].Value, []byte("bar")) {
|
||||
t.Fatalf("value expected 'bar', got %q", rr.Kvs[0].Value)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
|
||||
func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); err != rpctypes.ErrKeyNotFound {
|
||||
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr, rerr := kv.Get(context.TODO(), "zoo")
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
if len(rr.Kvs) != 1 {
|
||||
t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
|
||||
}
|
||||
if rr.Kvs[0].Lease != int64(resp.ID) {
|
||||
t.Fatalf("lease expected %v, got %v", resp.ID, rr.Kvs[0].Lease)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
|
||||
// wait for election timeout, then member[0] will not have a leader.
|
||||
var (
|
||||
electionTicks = 10
|
||||
tickDuration = 10 * time.Millisecond
|
||||
)
|
||||
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
|
||||
|
||||
kv := clus.Client(0)
|
||||
_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
|
||||
if err != rpctypes.ErrNoLeader {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// clients may give timeout errors since the members are stopped; take
|
||||
// the clients so that terminating the cluster won't complain
|
||||
clus.Client(1).Close()
|
||||
clus.Client(2).Close()
|
||||
clus.TakeClient(1)
|
||||
clus.TakeClient(2)
|
||||
}
|
||||
|
||||
func TestKVRange(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
keySet := []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"}
|
||||
for i, key := range keySet {
|
||||
if _, err := kv.Put(ctx, key, ""); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", i, key, err)
|
||||
}
|
||||
}
|
||||
resp, err := kv.Get(ctx, keySet[0])
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key (%v)", err)
|
||||
}
|
||||
wheader := resp.Header
|
||||
|
||||
tests := []struct {
|
||||
begin, end string
|
||||
rev int64
|
||||
opts []clientv3.OpOption
|
||||
|
||||
wantSet []*mvccpb.KeyValue
|
||||
}{
|
||||
// range first two
|
||||
{
|
||||
"a", "c",
|
||||
0,
|
||||
nil,
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
},
|
||||
},
|
||||
// range first two with serializable
|
||||
{
|
||||
"a", "c",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSerializable()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with rev
|
||||
{
|
||||
"a", "x",
|
||||
2,
|
||||
nil,
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with countOnly
|
||||
{
|
||||
"a", "x",
|
||||
2,
|
||||
[]clientv3.OpOption{clientv3.WithCountOnly()},
|
||||
|
||||
nil,
|
||||
},
|
||||
// range all with SortByKey, SortAscend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByKey, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortDescend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByModRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByModRevision, clientv3.SortDescend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// WithPrefix
|
||||
{
|
||||
"foo", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithPrefix()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
},
|
||||
},
|
||||
// WithFromKey
|
||||
{
|
||||
"fo", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// fetch entire keyspace using WithFromKey
|
||||
{
|
||||
"\x00", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// fetch entire keyspace using WithPrefix
|
||||
{
|
||||
"", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
opts := []clientv3.OpOption{clientv3.WithRange(tt.end), clientv3.WithRev(tt.rev)}
|
||||
opts = append(opts, tt.opts...)
|
||||
resp, err := kv.Get(ctx, tt.begin, opts...)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't range (%v)", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(wheader, resp.Header) {
|
||||
t.Fatalf("#%d: wheader expected %+v, got %+v", i, wheader, resp.Header)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.wantSet, resp.Kvs) {
|
||||
t.Fatalf("#%d: resp.Kvs expected %+v, got %+v", i, tt.wantSet, resp.Kvs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVGetErrConnClosed(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
_, err := cli.Get(context.TODO(), "foo")
|
||||
if err != nil && err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clus.TakeClient(0)
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDeleteRange(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
opts []clientv3.OpOption
|
||||
|
||||
wkeys []string
|
||||
}{
|
||||
// [a, c)
|
||||
{
|
||||
key: "a",
|
||||
opts: []clientv3.OpOption{clientv3.WithRange("c")},
|
||||
|
||||
wkeys: []string{"c", "c/abc", "d"},
|
||||
},
|
||||
// >= c
|
||||
{
|
||||
key: "c",
|
||||
opts: []clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
wkeys: []string{"a", "b"},
|
||||
},
|
||||
// c*
|
||||
{
|
||||
key: "c",
|
||||
opts: []clientv3.OpOption{clientv3.WithPrefix()},
|
||||
|
||||
wkeys: []string{"a", "b", "d"},
|
||||
},
|
||||
// *
|
||||
{
|
||||
key: "\x00",
|
||||
opts: []clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
wkeys: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
keySet := []string{"a", "b", "c", "c/abc", "d"}
|
||||
for j, key := range keySet {
|
||||
if _, err := kv.Put(ctx, key, ""); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", j, key, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := kv.Delete(ctx, tt.key, tt.opts...)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't delete range (%v)", i, err)
|
||||
}
|
||||
|
||||
resp, err := kv.Get(ctx, "a", clientv3.WithFromKey())
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't get keys (%v)", i, err)
|
||||
}
|
||||
keys := []string{}
|
||||
for _, kv := range resp.Kvs {
|
||||
keys = append(keys, string(kv.Key))
|
||||
}
|
||||
if !reflect.DeepEqual(tt.wkeys, keys) {
|
||||
t.Errorf("#%d: resp.Kvs got %v, expected %v", i, keys, tt.wkeys)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDelete(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
presp, err := kv.Put(ctx, "foo", "")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
if presp.Header.Revision != 2 {
|
||||
t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
|
||||
}
|
||||
resp, err := kv.Delete(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't delete key (%v)", err)
|
||||
}
|
||||
if resp.Header.Revision != 3 {
|
||||
t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
|
||||
}
|
||||
gresp, err := kv.Get(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key (%v)", err)
|
||||
}
|
||||
if len(gresp.Kvs) > 0 {
|
||||
t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVCompactError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
}
|
||||
_, err := kv.Compact(ctx, 6)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't compact 6 (%v)", err)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 6)
|
||||
if err != rpctypes.ErrCompacted {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 100)
|
||||
if err != rpctypes.ErrFutureRev {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVCompact(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := kv.Compact(ctx, 7)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't compact kv space (%v)", err)
|
||||
}
|
||||
_, err = kv.Compact(ctx, 7)
|
||||
if err == nil || err != rpctypes.ErrCompacted {
|
||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
|
||||
}
|
||||
|
||||
wcli := clus.RandClient()
|
||||
// new watcher could precede receiving the compaction without quorum first
|
||||
wcli.Get(ctx, "quorum-get")
|
||||
|
||||
wchan := wcli.Watch(ctx, "foo", clientv3.WithRev(3))
|
||||
|
||||
if wr := <-wchan; wr.CompactRevision != 7 {
|
||||
t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision)
|
||||
}
|
||||
if wr, ok := <-wchan; ok {
|
||||
t.Fatalf("wchan got %v, expected closed", wr)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 1000)
|
||||
if err == nil || err != rpctypes.ErrFutureRev {
|
||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetRetry ensures get will retry on disconnect.
|
||||
func TestKVGetRetry(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clusterSize := 3
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// because killing leader and following election
|
||||
// could give no other endpoints for client reconnection
|
||||
fIdx := (clus.WaitLeader(t) + 1) % clusterSize
|
||||
|
||||
kv := clus.Client(fIdx)
|
||||
ctx := context.TODO()
|
||||
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clus.Members[fIdx].Stop(t)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(ctx, "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
wkvs := []*mvccpb.KeyValue{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
CreateRevision: 2,
|
||||
ModRevision: 2,
|
||||
Version: 1,
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(gresp.Kvs, wkvs) {
|
||||
t.Fatalf("bad get: got %v, want %v", gresp.Kvs, wkvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
clus.Members[fIdx].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
|
||||
func TestKVPutFailGetRetry(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
defer cancel()
|
||||
_, err := kv.Put(ctx, "foo", "bar")
|
||||
if err == nil {
|
||||
t.Fatalf("got success on disconnected put, wanted error")
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(context.TODO(), "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("bad get kvs: got %+v, want empty", gresp.Kvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
|
||||
func TestKVGetCancel(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldconn := clus.Client(0).ActiveConnection()
|
||||
kv := clus.Client(0)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
cancel()
|
||||
|
||||
resp, err := kv.Get(ctx, "abc")
|
||||
if err == nil {
|
||||
t.Fatalf("cancel on get response %v, expected context error", resp)
|
||||
}
|
||||
newconn := clus.Client(0).ActiveConnection()
|
||||
if oldconn != newconn {
|
||||
t.Fatalf("cancel on get broke client connection")
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
|
||||
func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
// this Get fails and triggers an asynchronous connection retry
|
||||
_, err := cli.Get(ctx, "abc")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
||||
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
// get retries on all errors.
|
||||
// so here we use it to eat the potential broken pipe error for the next put.
|
||||
// grpc client might see a broken pipe error when we issue the get request before
|
||||
// grpc finds out the original connection is down due to the member shutdown.
|
||||
_, err := cli.Get(ctx, "abc")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this Put fails and triggers an asynchronous connection retry
|
||||
_, err = cli.Put(ctx, "abc", "123")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
|
||||
func TestKVPutOneEndpointDown(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 3)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
|
||||
// make a dead node
|
||||
clus.Members[rand.Intn(len(eps))].Stop(t)
|
||||
|
||||
// try to connect with dead node in the endpoint list
|
||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
|
||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
|
||||
// endpoints are down, then it will reconnect.
|
||||
func TestKVGetResetLoneEndpoint(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 2)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// disconnect everything
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[1].Stop(t)
|
||||
|
||||
// have Get try to reconnect
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
close(donec)
|
||||
}()
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
clus.Members[0].Restart(t)
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("timed out waiting for Get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
761
vendor/github.com/coreos/etcd/clientv3.old/integration/lease_test.go
generated
vendored
Normal file
761
vendor/github.com/coreos/etcd/clientv3.old/integration/lease_test.go
generated
vendored
Normal file
@ -0,0 +1,761 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestLeaseNotFoundError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrant(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create key with lease %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevoke(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.Revoke(context.Background(), clientv3.LeaseID(resp.ID))
|
||||
if err != nil {
|
||||
t.Errorf("failed to revoke lease %v", err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.KeepAliveOnce(context.Background(), resp.ID)
|
||||
if err != nil {
|
||||
t.Errorf("failed to keepalive lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.KeepAliveOnce(context.Background(), clientv3.LeaseID(0))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Errorf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAlive(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
rc, kerr := lapi.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
kresp, ok := <-rc
|
||||
if !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
lapi.Close()
|
||||
|
||||
_, ok = <-rc
|
||||
if ok {
|
||||
t.Errorf("chan is not closed, want lease Close() closes chan")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
resp, err := cli.Grant(context.Background(), 1)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, ok := <-rc; !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add a client that can connect to all the members of cluster via unix sock.
|
||||
// TODO: test handle more complicated failures.
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
t.Skip("test it when we have a cluster client")
|
||||
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// TODO: change this line to get a cluster client
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
rc, kerr := lapi.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
kresp := <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// restart the connected member.
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
select {
|
||||
case <-rc:
|
||||
t.Fatalf("unexpected keepalive")
|
||||
case <-time.After(10*time.Second/3 + 1):
|
||||
}
|
||||
|
||||
// recover the member.
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
kresp = <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
lapi.Close()
|
||||
|
||||
_, ok := <-rc
|
||||
if ok {
|
||||
t.Errorf("chan is not closed, want lease Close() closes chan")
|
||||
}
|
||||
}
|
||||
|
||||
type leaseCh struct {
|
||||
lid clientv3.LeaseID
|
||||
ch <-chan *clientv3.LeaseKeepAliveResponse
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveNotFound ensures a revoked lease won't stop other keep alives
|
||||
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
lchs := []leaseCh{}
|
||||
for i := 0; i < 3; i++ {
|
||||
resp, rerr := cli.Grant(context.TODO(), 5)
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
kach, kaerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kaerr != nil {
|
||||
t.Fatal(kaerr)
|
||||
}
|
||||
lchs = append(lchs, leaseCh{resp.ID, kach})
|
||||
}
|
||||
|
||||
if _, err := cli.Revoke(context.TODO(), lchs[1].lid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-lchs[0].ch
|
||||
if _, ok := <-lchs[0].ch; !ok {
|
||||
t.Fatalf("closed keepalive on wrong lease")
|
||||
}
|
||||
|
||||
timec := time.After(5 * time.Second)
|
||||
for range lchs[1].ch {
|
||||
select {
|
||||
case <-timec:
|
||||
t.Fatalf("revoke did not close keep alive")
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
_, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil && err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Grant(context.TODO(), 5); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
leaseID := resp.ID
|
||||
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Revoke took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveCloseAfterDisconnectExpire ensures the keep alive channel is closed
|
||||
// following a disconnection, lease revoke, then reconnect.
|
||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
kresp := <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
time.Sleep(time.Second)
|
||||
clus.WaitLeader(t)
|
||||
|
||||
if _, err := clus.Client(1).Revoke(context.TODO(), resp.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
// some keep-alives may still be buffered; drain until close
|
||||
timer := time.After(time.Duration(kresp.TTL) * time.Second)
|
||||
for kresp != nil {
|
||||
select {
|
||||
case kresp = <-rc:
|
||||
case <-timer:
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// the initial keep alive request never gets a response.
|
||||
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// a keep alive request after the first never gets a response.
|
||||
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
if kresp := <-rc; kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLive(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
lapi := c
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
kv := clus.RandClient()
|
||||
keys := []string{"foo1", "foo2"}
|
||||
for i := range keys {
|
||||
if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// linearized read to ensure Puts propagated to server backing lapi
|
||||
if _, err := c.Get(context.TODO(), "abc"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.GrantedTTL != int64(10) {
|
||||
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||
}
|
||||
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
|
||||
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
|
||||
}
|
||||
ks := make([]string, len(lresp.Keys))
|
||||
for i := range lresp.Keys {
|
||||
ks[i] = string(lresp.Keys[i])
|
||||
}
|
||||
sort.Strings(ks)
|
||||
if !reflect.DeepEqual(ks, keys) {
|
||||
t.Fatalf("keys expected %v, got %v", keys, ks)
|
||||
}
|
||||
|
||||
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if len(lresp.Keys) != 0 {
|
||||
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
resp, err := cli.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
_, err = cli.Revoke(context.Background(), resp.ID)
|
||||
if err != nil {
|
||||
t.Errorf("failed to Revoke lease %v", err)
|
||||
}
|
||||
|
||||
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
|
||||
// TimeToLive() doesn't return LeaseNotFound error
|
||||
// but return a response with TTL to be -1
|
||||
if err != nil {
|
||||
t.Fatalf("expected err to be nil")
|
||||
}
|
||||
if lresp == nil {
|
||||
t.Fatalf("expected lresp not to be nil")
|
||||
}
|
||||
if lresp.ResponseHeader == nil {
|
||||
t.Fatalf("expected ResponseHeader not to be nil")
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("expected Lease ID %v, but got %v", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.TTL != -1 {
|
||||
t.Fatalf("expected TTL %v, but got %v", lresp.TTL, lresp.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
r, err := cli.Grant(context.TODO(), 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kctx, kcancel := context.WithCancel(context.Background())
|
||||
defer kcancel()
|
||||
ka, err := cli.KeepAlive(kctx, r.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// consume first keepalive so next message sends when cluster is down
|
||||
<-ka
|
||||
lastKa := time.Now()
|
||||
|
||||
// force keepalive stream message to timeout
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
// Use TTL-2 since the client closes the keepalive channel if no
|
||||
// keepalive arrives before the lease deadline; the client will
|
||||
// try to resend a keepalive after TTL/3 seconds, so for a TTL of 4,
|
||||
// sleeping for 2s should be sufficient time for issuing a retry.
|
||||
// The cluster has two seconds to recover and reply to the keepalive.
|
||||
time.Sleep(time.Duration(r.TTL-2) * time.Second)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
if time.Since(lastKa) > time.Duration(r.TTL)*time.Second {
|
||||
t.Skip("waited too long for server stop and restart")
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-ka:
|
||||
if !ok {
|
||||
t.Fatalf("keepalive closed")
|
||||
}
|
||||
case <-time.After(time.Duration(r.TTL) * time.Second):
|
||||
t.Fatalf("timed out waiting for keepalive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx := context.Background()
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
resp, err := cli.Grant(ctx, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli.Close()
|
||||
|
||||
_, err = cli.KeepAlive(ctx, resp.ID)
|
||||
if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok {
|
||||
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
|
||||
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
|
||||
// transient cluster failure.
|
||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
numReqs := 5
|
||||
cli := clus.Client(0)
|
||||
|
||||
// bring up a session, tear it down
|
||||
updown := func(i int) error {
|
||||
sess, err := concurrency.NewSession(cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
sess.Close()
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(time.Minute / 4):
|
||||
t.Fatalf("timeout %d", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
mkReqs := func(n int) {
|
||||
wg.Add(numReqs)
|
||||
for i := 0; i < numReqs; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := updown(n)
|
||||
if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
mkReqs(1)
|
||||
clus.Members[1].Stop(t)
|
||||
mkReqs(2)
|
||||
time.Sleep(time.Second)
|
||||
mkReqs(3)
|
||||
clus.Members[1].Restart(t)
|
||||
mkReqs(4)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
|
||||
func TestLeaseWithRequireLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
lid1, err1 := c.Grant(context.TODO(), 60)
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
lid2, err2 := c.Grant(context.TODO(), 60)
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
// kaReqLeader close if the leader is lost
|
||||
kaReqLeader, kerr1 := c.KeepAlive(clientv3.WithRequireLeader(context.TODO()), lid1.ID)
|
||||
if kerr1 != nil {
|
||||
t.Fatal(kerr1)
|
||||
}
|
||||
// kaWait will wait even if the leader is lost
|
||||
kaWait, kerr2 := c.KeepAlive(context.TODO(), lid2.ID)
|
||||
if kerr2 != nil {
|
||||
t.Fatal(kerr2)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-kaReqLeader:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("require leader first keep-alive timed out")
|
||||
}
|
||||
select {
|
||||
case <-kaWait:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("leader not required first keep-alive timed out")
|
||||
}
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
// kaReqLeader may issue multiple requests while waiting for the first
|
||||
// response from proxy server; drain any stray keepalive responses
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for len(kaReqLeader) > 0 {
|
||||
<-kaReqLeader
|
||||
}
|
||||
|
||||
select {
|
||||
case resp, ok := <-kaReqLeader:
|
||||
if ok {
|
||||
t.Fatalf("expected closed require leader, got response %+v", resp)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("keepalive with require leader took too long to close")
|
||||
}
|
||||
select {
|
||||
case _, ok := <-kaWait:
|
||||
if !ok {
|
||||
t.Fatalf("got closed channel with no require leader, expected non-closed")
|
||||
}
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
// wait some to detect any closes happening soon after kaReqLeader closing
|
||||
}
|
||||
}
|
21
vendor/github.com/coreos/etcd/clientv3.old/integration/logger_test.go
generated
vendored
Normal file
21
vendor/github.com/coreos/etcd/clientv3.old/integration/logger_test.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import "github.com/coreos/pkg/capnslog"
|
||||
|
||||
func init() {
|
||||
capnslog.SetGlobalLogLevel(capnslog.INFO)
|
||||
}
|
20
vendor/github.com/coreos/etcd/clientv3.old/integration/main_test.go
generated
vendored
Normal file
20
vendor/github.com/coreos/etcd/clientv3.old/integration/main_test.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
v := m.Run()
|
||||
if v == 0 && testutil.CheckLeakedGoroutine() {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(v)
|
||||
}
|
177
vendor/github.com/coreos/etcd/clientv3.old/integration/metrics_test.go
generated
vendored
Normal file
177
vendor/github.com/coreos/etcd/clientv3.old/integration/metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestV3ClientMetrics(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
addr string = "localhost:27989"
|
||||
ln net.Listener
|
||||
err error
|
||||
)
|
||||
|
||||
// listen for all prometheus metrics
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
|
||||
srv := &http.Server{Handler: prometheus.Handler()}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
ln, err = transport.NewUnixListener(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
|
||||
}
|
||||
|
||||
err = srv.Serve(ln)
|
||||
if err != nil && !transport.IsClosedConnError(err) {
|
||||
t.Fatalf("Err serving http requests: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
url := "unix://" + addr + "/metrics"
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
}
|
||||
cli, cerr := clientv3.New(cfg)
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
wc := cli.Watch(context.Background(), "foo")
|
||||
|
||||
wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
|
||||
pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
|
||||
_, err = cli.Put(context.Background(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Errorf("Error putting value in key store")
|
||||
}
|
||||
|
||||
pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
if pBefore+1 != pAfter {
|
||||
t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore)
|
||||
}
|
||||
|
||||
// consume watch response
|
||||
select {
|
||||
case <-wc:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("Timeout occurred for getting watch response")
|
||||
}
|
||||
|
||||
wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
if wBefore+1 != wAfter {
|
||||
t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore)
|
||||
}
|
||||
|
||||
ln.Close()
|
||||
<-donec
|
||||
}
|
||||
|
||||
func sumCountersForMetricAndLabels(t *testing.T, url string, metricName string, matchingLabelValues ...string) int {
|
||||
count := 0
|
||||
for _, line := range getHTTPBodyAsLines(t, url) {
|
||||
ok := true
|
||||
if !strings.HasPrefix(line, metricName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, labelValue := range matchingLabelValues {
|
||||
if !strings.Contains(line, `"`+labelValue+`"`) {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1]
|
||||
valueFloat, err := strconv.ParseFloat(valueString, 32)
|
||||
if err != nil {
|
||||
t.Fatalf("failed parsing value for line: %v and matchingLabelValues: %v", line, matchingLabelValues)
|
||||
}
|
||||
count += int(valueFloat)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func getHTTPBodyAsLines(t *testing.T, url string) []string {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
tr, err := transport.NewTransport(cfgtls, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting transport: %v", err)
|
||||
}
|
||||
|
||||
tr.MaxIdleConns = -1
|
||||
tr.DisableKeepAlives = true
|
||||
|
||||
cli := &http.Client{Transport: tr}
|
||||
|
||||
resp, err := cli.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching: %v", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(resp.Body)
|
||||
lines := []string{}
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
t.Fatalf("error reading: %v", err)
|
||||
}
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return lines
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user