mirror of
https://gitee.com/rainbond/Rainbond.git
synced 2024-11-30 02:38:17 +08:00
[FIX] update etcdclient to release 3.2.0 and fix the bug that service regist
This commit is contained in:
parent
471964bc2d
commit
02a77bc182
@ -103,6 +103,7 @@ func (k *KeepAlive) Start() error {
|
||||
}
|
||||
logrus.Warnf("%s lid[%x] keepAlive err: %s, try to reset...", k.Endpoint, k.LID, err.Error())
|
||||
k.LID = 0
|
||||
timer.Reset(duration)
|
||||
}()
|
||||
} else {
|
||||
if err := k.reg(); err != nil {
|
||||
|
@ -1,31 +1,31 @@
|
||||
|
||||
// RAINBOND, Application Management Platform
|
||||
// Copyright (C) 2014-2017 Goodrain Co., Ltd.
|
||||
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version. For any non-GPL usage of Rainbond,
|
||||
// one or multiple Commercial Licenses authorized by Goodrain Co., Ltd.
|
||||
// must be obtained first.
|
||||
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package mq
|
||||
|
||||
import (
|
||||
"github.com/goodrain/rainbond/cmd/mq/option"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goodrain/rainbond/cmd/mq/option"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
etcdutil "github.com/goodrain/rainbond/pkg/util/etcd"
|
||||
@ -56,7 +56,6 @@ func NewActionMQ(ctx context.Context, c option.Config) ActionMQ {
|
||||
|
||||
type etcdQueue struct {
|
||||
config option.Config
|
||||
client *clientv3.Client
|
||||
ctx context.Context
|
||||
queues map[string]string
|
||||
queuesLock sync.Mutex
|
||||
@ -64,15 +63,6 @@ type etcdQueue struct {
|
||||
|
||||
func (e *etcdQueue) Start() error {
|
||||
logrus.Debug("etcd message queue client starting")
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: e.config.EtcdEndPoints,
|
||||
DialTimeout: time.Duration(e.config.EtcdTimeout) * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
etcdutil.HandleEtcdError(err)
|
||||
return err
|
||||
}
|
||||
e.client = cli
|
||||
topics := os.Getenv("topics")
|
||||
if topics != "" {
|
||||
ts := strings.Split(topics, ",")
|
||||
@ -108,20 +98,35 @@ func (e *etcdQueue) GetAllTopics() []string {
|
||||
}
|
||||
|
||||
func (e *etcdQueue) Stop() error {
|
||||
if e.client != nil {
|
||||
return e.client.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (e *etcdQueue) queueKey(topic string) string {
|
||||
return e.config.EtcdPrefix + "/" + topic
|
||||
}
|
||||
func (e *etcdQueue) Enqueue(ctx context.Context, topic, value string) error {
|
||||
queue := etcdutil.NewQueue(e.client, e.queueKey(topic), ctx)
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: e.config.EtcdEndPoints,
|
||||
DialTimeout: time.Duration(e.config.EtcdTimeout) * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
etcdutil.HandleEtcdError(err)
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
queue := etcdutil.NewQueue(cli, e.queueKey(topic), ctx)
|
||||
return queue.Enqueue(value)
|
||||
}
|
||||
|
||||
func (e *etcdQueue) Dequeue(ctx context.Context, topic string) (string, error) {
|
||||
queue := etcdutil.NewQueue(e.client, e.queueKey(topic), ctx)
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: e.config.EtcdEndPoints,
|
||||
DialTimeout: time.Duration(e.config.EtcdTimeout) * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
etcdutil.HandleEtcdError(err)
|
||||
return "", err
|
||||
}
|
||||
defer cli.Close()
|
||||
queue := etcdutil.NewQueue(cli, e.queueKey(topic), ctx)
|
||||
return queue.Dequeue()
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func (c *Client) Post(key, val string, opts ...client.OpOption) (*client.PutResp
|
||||
if !txnresp.Succeeded {
|
||||
return nil, ErrKeyExists
|
||||
}
|
||||
return txnresp.ToOpResponse().Put(), nil
|
||||
return txnresp.OpResponse().Put(), nil
|
||||
}
|
||||
|
||||
//Put etcd v3 Put
|
||||
|
222
vendor/github.com/coreos/etcd/clientv3.old/auth.go
generated
vendored
Normal file
222
vendor/github.com/coreos/etcd/clientv3.old/auth.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
AuthEnableResponse pb.AuthEnableResponse
|
||||
AuthDisableResponse pb.AuthDisableResponse
|
||||
AuthenticateResponse pb.AuthenticateResponse
|
||||
AuthUserAddResponse pb.AuthUserAddResponse
|
||||
AuthUserDeleteResponse pb.AuthUserDeleteResponse
|
||||
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
|
||||
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
|
||||
AuthUserGetResponse pb.AuthUserGetResponse
|
||||
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
|
||||
AuthRoleAddResponse pb.AuthRoleAddResponse
|
||||
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
|
||||
AuthRoleGetResponse pb.AuthRoleGetResponse
|
||||
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
|
||||
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
|
||||
AuthUserListResponse pb.AuthUserListResponse
|
||||
AuthRoleListResponse pb.AuthRoleListResponse
|
||||
|
||||
PermissionType authpb.Permission_Type
|
||||
Permission authpb.Permission
|
||||
)
|
||||
|
||||
const (
|
||||
PermRead = authpb.READ
|
||||
PermWrite = authpb.WRITE
|
||||
PermReadWrite = authpb.READWRITE
|
||||
)
|
||||
|
||||
type Auth interface {
|
||||
// AuthEnable enables auth of an etcd cluster.
|
||||
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
|
||||
|
||||
// AuthDisable disables auth of an etcd cluster.
|
||||
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
|
||||
|
||||
// UserAdd adds a new user to an etcd cluster.
|
||||
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
|
||||
|
||||
// UserDelete deletes a user from an etcd cluster.
|
||||
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
|
||||
|
||||
// UserChangePassword changes a password of a user.
|
||||
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
|
||||
|
||||
// UserGrantRole grants a role to a user.
|
||||
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
|
||||
|
||||
// UserGet gets a detailed information of a user.
|
||||
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
|
||||
|
||||
// UserList gets a list of all users.
|
||||
UserList(ctx context.Context) (*AuthUserListResponse, error)
|
||||
|
||||
// UserRevokeRole revokes a role of a user.
|
||||
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
|
||||
|
||||
// RoleAdd adds a new role to an etcd cluster.
|
||||
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
|
||||
|
||||
// RoleGrantPermission grants a permission to a role.
|
||||
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
|
||||
|
||||
// RoleGet gets a detailed information of a role.
|
||||
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
|
||||
|
||||
// RoleList gets a list of all roles.
|
||||
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
|
||||
|
||||
// RoleRevokePermission revokes a permission from a role.
|
||||
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
|
||||
|
||||
// RoleDelete deletes a role.
|
||||
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
|
||||
}
|
||||
|
||||
type auth struct {
|
||||
remote pb.AuthClient
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
|
||||
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
|
||||
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
|
||||
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
|
||||
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
|
||||
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
|
||||
perm := &authpb.Permission{
|
||||
Key: []byte(key),
|
||||
RangeEnd: []byte(rangeEnd),
|
||||
PermType: authpb.Permission_Type(permType),
|
||||
}
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
|
||||
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
|
||||
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
|
||||
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func StrToPermissionType(s string) (PermissionType, error) {
|
||||
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
|
||||
if ok {
|
||||
return PermissionType(val), nil
|
||||
}
|
||||
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
|
||||
}
|
||||
|
||||
type authenticator struct {
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *authenticator) close() {
|
||||
auth.conn.Close()
|
||||
}
|
||||
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authenticator{
|
||||
conn: conn,
|
||||
remote: pb.NewAuthClient(conn),
|
||||
}, nil
|
||||
}
|
526
vendor/github.com/coreos/etcd/clientv3.old/client.go
generated
vendored
Normal file
526
vendor/github.com/coreos/etcd/clientv3.old/client.go
generated
vendored
Normal file
@ -0,0 +1,526 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
|
||||
ErrOldCluster = errors.New("etcdclient: old cluster version")
|
||||
)
|
||||
|
||||
// Client provides and manages an etcd v3 client session.
|
||||
type Client struct {
|
||||
Cluster
|
||||
KV
|
||||
Lease
|
||||
Watcher
|
||||
Auth
|
||||
Maintenance
|
||||
|
||||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
func New(cfg Config) (*Client, error) {
|
||||
if len(cfg.Endpoints) == 0 {
|
||||
return nil, ErrNoAvailableEndpoints
|
||||
}
|
||||
|
||||
return newClient(&cfg)
|
||||
}
|
||||
|
||||
// NewCtxClient creates a client with a context but no underlying grpc
|
||||
// connection. This is useful for embedded cases that override the
|
||||
// service interface implementations and do not need connection management.
|
||||
func NewCtxClient(ctx context.Context) *Client {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
return &Client{ctx: cctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// NewFromURL creates a new etcdv3 client from a URL.
|
||||
func NewFromURL(url string) (*Client, error) {
|
||||
return New(Config{Endpoints: []string{url}})
|
||||
}
|
||||
|
||||
// Close shuts down the client's etcd connections.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
c.Watcher.Close()
|
||||
c.Lease.Close()
|
||||
if c.conn != nil {
|
||||
return toErr(c.ctx, c.conn.Close())
|
||||
}
|
||||
return c.ctx.Err()
|
||||
}
|
||||
|
||||
// Ctx is a context for "out of band" messages (e.g., for sending
|
||||
// "clean up" message when another context is canceled). It is
|
||||
// canceled on client Close().
|
||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||
|
||||
// Endpoints lists the registered endpoints for the client.
|
||||
func (c *Client) Endpoints() (eps []string) {
|
||||
// copy the slice; protect original endpoints from being changed
|
||||
eps = make([]string, len(c.cfg.Endpoints))
|
||||
copy(eps, c.cfg.Endpoints)
|
||||
return
|
||||
}
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
func (c *Client) Sync(ctx context.Context) error {
|
||||
mresp, err := c.MemberList(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var eps []string
|
||||
for _, m := range mresp.Members {
|
||||
eps = append(eps, m.ClientURLs...)
|
||||
}
|
||||
c.SetEndpoints(eps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) autoSync() {
|
||||
if c.cfg.AutoSyncInterval == time.Duration(0) {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type authTokenCredential struct {
|
||||
token string
|
||||
tokenMu *sync.RWMutex
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
cred.tokenMu.RLock()
|
||||
defer cred.tokenMu.RUnlock()
|
||||
return map[string]string{
|
||||
"token": cred.token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
proto = "tcp"
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
// strip scheme:// prefix since grpc dials by host
|
||||
host = url.Host
|
||||
switch url.Scheme {
|
||||
case "http", "https":
|
||||
case "unix", "unixs":
|
||||
proto = "unix"
|
||||
host = url.Host + url.Path
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
creds = c.creds
|
||||
switch scheme {
|
||||
case "unix":
|
||||
case "http":
|
||||
creds = nil
|
||||
case "https", "unixs":
|
||||
if creds != nil {
|
||||
break
|
||||
}
|
||||
tlsconfig := &tls.Config{}
|
||||
emptyCreds := credentials.NewTLS(tlsconfig)
|
||||
creds = &emptyCreds
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
}
|
||||
// Only relevant when KeepAliveTime is non-zero
|
||||
if c.cfg.DialKeepAliveTimeout > 0 {
|
||||
params.Timeout = c.cfg.DialKeepAliveTimeout
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
proto, host, _ = parseEndpoint(endpoint)
|
||||
}
|
||||
if proto == "" {
|
||||
return nil, fmt.Errorf("unknown scheme for %q", host)
|
||||
}
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return nil, c.ctx.Err()
|
||||
default:
|
||||
}
|
||||
dialer := &net.Dialer{Timeout: t}
|
||||
conn, err := dialer.DialContext(c.ctx, proto, host)
|
||||
if err != nil {
|
||||
select {
|
||||
case c.dialerrc <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
return conn, err
|
||||
}
|
||||
opts = append(opts, grpc.WithDialer(f))
|
||||
|
||||
creds := c.creds
|
||||
if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
|
||||
creds = c.processCreds(scheme)
|
||||
}
|
||||
if creds != nil {
|
||||
opts = append(opts, grpc.WithTransportCredentials(*creds))
|
||||
} else {
|
||||
opts = append(opts, grpc.WithInsecure())
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// Dial connects to a single endpoint using the client's config.
|
||||
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||
return c.dial(endpoint)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
c.tokenCred.tokenMu.Lock()
|
||||
c.tokenCred.token = resp.Token
|
||||
c.tokenCred.tokenMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||
host := getHost(endpoint)
|
||||
if c.Username != "" && c.Password != "" {
|
||||
c.tokenCred = &authTokenCredential{
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
|
||||
ctx := c.ctx
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
|
||||
err := c.getToken(ctx)
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
}
|
||||
|
||||
opts = append(opts, c.cfg.DialOptions...)
|
||||
|
||||
conn, err := grpc.DialContext(c.ctx, host, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// WithRequireLeader requires client requests to only succeed
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
var creds *credentials.TransportCredentials
|
||||
if cfg.TLS != nil {
|
||||
c := credentials.NewTLS(cfg.TLS)
|
||||
creds = &c
|
||||
}
|
||||
|
||||
// use a temporary skeleton client to bootstrap first connection
|
||||
baseCtx := context.TODO()
|
||||
if cfg.Context != nil {
|
||||
baseCtx = cfg.Context
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(baseCtx)
|
||||
client := &Client{
|
||||
conn: nil,
|
||||
dialerrc: make(chan error, 1),
|
||||
cfg: *cfg,
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the ServerName is in the endpoint.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := grpc.ErrClientConnTimeout
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
}
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
client.Cluster = NewCluster(client)
|
||||
client.KV = NewKV(client)
|
||||
client.Lease = NewLease(client)
|
||||
client.Watcher = NewWatcher(client)
|
||||
client.Auth = NewAuth(client)
|
||||
client.Maintenance = NewMaintenance(client)
|
||||
|
||||
if cfg.RejectOldCluster {
|
||||
if err := client.checkVersion(); err != nil {
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
go client.autoSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkVersion() (err error) {
|
||||
var wg sync.WaitGroup
|
||||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
// if cluster is current, any endpoint gives a recent version
|
||||
go func(e string) {
|
||||
defer wg.Done()
|
||||
resp, rerr := c.Status(ctx, e)
|
||||
if rerr != nil {
|
||||
errc <- rerr
|
||||
return
|
||||
}
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, rerr = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
rerr = ErrOldCluster
|
||||
}
|
||||
errc <- rerr
|
||||
}(ep)
|
||||
}
|
||||
// wait for success
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
if err = <-errc; err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
// ActiveConnection returns the current in-use connection
|
||||
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
|
||||
|
||||
// isHaltErr returns true if the given error and context indicate no forward
|
||||
// progress can be made, even after reconnecting.
|
||||
func isHaltErr(ctx context.Context, err error) bool {
|
||||
if ctx != nil && ctx.Err() != nil {
|
||||
return true
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
err = rpctypes.Error(err)
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
case codes.Canceled:
|
||||
if ctx.Err() != nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func canceledByCaller(stopCtx context.Context, err error) bool {
|
||||
if stopCtx.Err() == nil || err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return err == context.Canceled || err == context.DeadlineExceeded
|
||||
}
|
154
vendor/github.com/coreos/etcd/clientv3.old/client_test.go
generated
vendored
Normal file
154
vendor/github.com/coreos/etcd/clientv3.old/client_test.go
generated
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestDialCancel(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
// accept first connection so client is created with dial timeout
|
||||
ln, err := net.Listen("unix", "dialcancel:12345")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer ln.Close()
|
||||
|
||||
ep := "unix://dialcancel:12345"
|
||||
cfg := Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: 30 * time.Second}
|
||||
c, err := New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// connect to ipv4 blackhole so dial blocks
|
||||
c.SetEndpoints("http://254.0.0.1:12345")
|
||||
|
||||
// issue Get to force redial attempts
|
||||
getc := make(chan struct{})
|
||||
go func() {
|
||||
defer close(getc)
|
||||
// Get may hang forever on grpc's Stream.Header() if its
|
||||
// context is never canceled.
|
||||
c.Get(c.Ctx(), "abc")
|
||||
}()
|
||||
|
||||
// wait a little bit so client close is after dial starts
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
c.Close()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("failed to close")
|
||||
case <-donec:
|
||||
}
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("get failed to exit")
|
||||
case <-getc:
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
testCfgs := []Config{
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second,
|
||||
},
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: time.Second,
|
||||
Username: "abc",
|
||||
Password: "def",
|
||||
},
|
||||
}
|
||||
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("#%d: dial didn't wait (%v)", i, err)
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDialNoTimeout(t *testing.T) {
|
||||
cfg := Config{Endpoints: []string{"127.0.0.1:12345"}}
|
||||
c, err := New(cfg)
|
||||
if c == nil || err != nil {
|
||||
t.Fatalf("new client with DialNoWait should succeed, got %v", err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
|
||||
func TestIsHaltErr(t *testing.T) {
|
||||
if !isHaltErr(nil, fmt.Errorf("etcdserver: some etcdserver error")) {
|
||||
t.Errorf(`error prefixed with "etcdserver: " should be Halted by default`)
|
||||
}
|
||||
if isHaltErr(nil, rpctypes.ErrGRPCStopped) {
|
||||
t.Errorf("error %v should not halt", rpctypes.ErrGRPCStopped)
|
||||
}
|
||||
if isHaltErr(nil, rpctypes.ErrGRPCNoLeader) {
|
||||
t.Errorf("error %v should not halt", rpctypes.ErrGRPCNoLeader)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
if isHaltErr(ctx, nil) {
|
||||
t.Errorf("no error and active context should not be Halted")
|
||||
}
|
||||
cancel()
|
||||
if !isHaltErr(ctx, nil) {
|
||||
t.Errorf("cancel on context should be Halted")
|
||||
}
|
||||
}
|
66
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/example_key_test.go
generated
vendored
Normal file
66
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/example_key_test.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3util_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/clientv3util"
|
||||
)
|
||||
|
||||
func ExampleKeyExists_put() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a put only if key is missing
|
||||
// It is useful to do the check (transactionally) to avoid overwriting
|
||||
// the existing key which would generate potentially unwanted events,
|
||||
// unless of course you wanted to do an overwrite no matter what.
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
If(clientv3util.KeyMissing("purpleidea")).
|
||||
Then(clientv3.OpPut("purpleidea", "hello world")).
|
||||
Commit()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKeyExists_delete() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"127.0.0.1:2379"},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a delete only if key already exists
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
If(clientv3util.KeyExists("purpleidea")).
|
||||
Then(clientv3.OpDelete("purpleidea")).
|
||||
Commit()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
33
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/util.go
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/clientv3.old/clientv3util/util.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clientv3util contains utility functions derived from clientv3.
|
||||
package clientv3util
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
// KeyExists returns a comparison operation that evaluates to true iff the given
|
||||
// key exists. It does this by checking if the key `Version` is greater than 0.
|
||||
// It is a useful guard in transaction delete operations.
|
||||
func KeyExists(key string) clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.Version(key), ">", 0)
|
||||
}
|
||||
|
||||
// KeyMissing returns a comparison operation that evaluates to true iff the
|
||||
// given key does not exist.
|
||||
func KeyMissing(key string) clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.Version(key), "=", 0)
|
||||
}
|
100
vendor/github.com/coreos/etcd/clientv3.old/cluster.go
generated
vendored
Normal file
100
vendor/github.com/coreos/etcd/clientv3.old/cluster.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
Member pb.Member
|
||||
MemberListResponse pb.MemberListResponse
|
||||
MemberAddResponse pb.MemberAddResponse
|
||||
MemberRemoveResponse pb.MemberRemoveResponse
|
||||
MemberUpdateResponse pb.MemberUpdateResponse
|
||||
)
|
||||
|
||||
type Cluster interface {
|
||||
// MemberList lists the current cluster membership.
|
||||
MemberList(ctx context.Context) (*MemberListResponse, error)
|
||||
|
||||
// MemberAdd adds a new member into the cluster.
|
||||
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
|
||||
|
||||
// MemberRemove removes an existing member from the cluster.
|
||||
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
|
||||
|
||||
// MemberUpdate updates the peer addresses of the member.
|
||||
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
remote pb.ClusterClient
|
||||
}
|
||||
|
||||
func NewCluster(c *Client) Cluster {
|
||||
return &cluster{remote: RetryClusterClient(c)}
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
return &cluster{remote: remote}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberAddResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*MemberRemoveResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
53
vendor/github.com/coreos/etcd/clientv3.old/compact_op.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/clientv3.old/compact_op.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
// CompactOp represents a compact operation.
|
||||
type CompactOp struct {
|
||||
revision int64
|
||||
physical bool
|
||||
}
|
||||
|
||||
// CompactOption configures compact operation.
|
||||
type CompactOption func(*CompactOp)
|
||||
|
||||
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// OpCompact wraps slice CompactOption to create a CompactOp.
|
||||
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
|
||||
ret := CompactOp{revision: rev}
|
||||
ret.applyCompactOpts(opts)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
30
vendor/github.com/coreos/etcd/clientv3.old/compact_op_test.go
generated
vendored
Normal file
30
vendor/github.com/coreos/etcd/clientv3.old/compact_op_test.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
func TestCompactOp(t *testing.T) {
|
||||
req1 := OpCompact(100, WithCompactPhysical()).toRequest()
|
||||
req2 := &etcdserverpb.CompactionRequest{Revision: 100, Physical: true}
|
||||
if !reflect.DeepEqual(req1, req2) {
|
||||
t.Fatalf("expected %+v, got %+v", req2, req1)
|
||||
}
|
||||
}
|
122
vendor/github.com/coreos/etcd/clientv3.old/compare.go
generated
vendored
Normal file
122
vendor/github.com/coreos/etcd/clientv3.old/compare.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type CompareTarget int
|
||||
type CompareResult int
|
||||
|
||||
const (
|
||||
CompareVersion CompareTarget = iota
|
||||
CompareCreated
|
||||
CompareModified
|
||||
CompareValue
|
||||
)
|
||||
|
||||
type Cmp pb.Compare
|
||||
|
||||
func Compare(cmp Cmp, result string, v interface{}) Cmp {
|
||||
var r pb.Compare_CompareResult
|
||||
|
||||
switch result {
|
||||
case "=":
|
||||
r = pb.Compare_EQUAL
|
||||
case "!=":
|
||||
r = pb.Compare_NOT_EQUAL
|
||||
case ">":
|
||||
r = pb.Compare_GREATER
|
||||
case "<":
|
||||
r = pb.Compare_LESS
|
||||
default:
|
||||
panic("Unknown result op")
|
||||
}
|
||||
|
||||
cmp.Result = r
|
||||
switch cmp.Target {
|
||||
case pb.Compare_VALUE:
|
||||
val, ok := v.(string)
|
||||
if !ok {
|
||||
panic("bad compare value")
|
||||
}
|
||||
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
|
||||
case pb.Compare_VERSION:
|
||||
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
|
||||
case pb.Compare_CREATE:
|
||||
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
|
||||
case pb.Compare_MOD:
|
||||
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
|
||||
default:
|
||||
panic("Unknown compare type")
|
||||
}
|
||||
return cmp
|
||||
}
|
||||
|
||||
func Value(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
|
||||
}
|
||||
|
||||
func Version(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
|
||||
}
|
||||
|
||||
func CreateRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
|
||||
}
|
||||
|
||||
func ModRevision(key string) Cmp {
|
||||
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
|
||||
}
|
||||
|
||||
// KeyBytes returns the byte slice holding with the comparison key.
|
||||
func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the comparison key.
|
||||
func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
|
||||
|
||||
// ValueBytes returns the byte slice holding the comparison value, if any.
|
||||
func (cmp *Cmp) ValueBytes() []byte {
|
||||
if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
|
||||
return tu.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// WithRange sets the comparison to scan the range [key, end).
|
||||
func (cmp Cmp) WithRange(end string) Cmp {
|
||||
cmp.RangeEnd = []byte(end)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// WithPrefix sets the comparison to scan all keys prefixed by the key.
|
||||
func (cmp Cmp) WithPrefix() Cmp {
|
||||
cmp.RangeEnd = getPrefix(cmp.Key)
|
||||
return cmp
|
||||
}
|
||||
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := val.(int); ok {
|
||||
return int64(v)
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
17
vendor/github.com/coreos/etcd/clientv3.old/concurrency/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3.old/concurrency/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package concurrency implements concurrency operations on top of
|
||||
// etcd such as distributed locks, barriers, and elections.
|
||||
package concurrency
|
243
vendor/github.com/coreos/etcd/clientv3.old/concurrency/election.go
generated
vendored
Normal file
243
vendor/github.com/coreos/etcd/clientv3.old/concurrency/election.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrElectionNotLeader = errors.New("election: not leader")
|
||||
ErrElectionNoLeader = errors.New("election: no leader")
|
||||
)
|
||||
|
||||
type Election struct {
|
||||
session *Session
|
||||
|
||||
keyPrefix string
|
||||
|
||||
leaderKey string
|
||||
leaderRev int64
|
||||
leaderSession *Session
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
// NewElection returns a new election on a given key prefix.
|
||||
func NewElection(s *Session, pfx string) *Election {
|
||||
return &Election{session: s, keyPrefix: pfx + "/"}
|
||||
}
|
||||
|
||||
// ResumeElection initializes an election with a known leader.
|
||||
func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
|
||||
return &Election{
|
||||
session: s,
|
||||
leaderKey: leaderKey,
|
||||
leaderRev: leaderRev,
|
||||
leaderSession: s,
|
||||
}
|
||||
}
|
||||
|
||||
// Campaign puts a value as eligible for the election. It blocks until
|
||||
// it is elected, an error occurs, or the context is cancelled.
|
||||
func (e *Election) Campaign(ctx context.Context, val string) error {
|
||||
s := e.session
|
||||
client := e.session.Client()
|
||||
|
||||
k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
|
||||
txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
|
||||
txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
|
||||
txn = txn.Else(v3.OpGet(k))
|
||||
resp, err := txn.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
|
||||
if !resp.Succeeded {
|
||||
kv := resp.Responses[0].GetResponseRange().Kvs[0]
|
||||
e.leaderRev = kv.CreateRevision
|
||||
if string(kv.Value) != val {
|
||||
if err = e.Proclaim(ctx, val); err != nil {
|
||||
e.Resign(ctx)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
|
||||
if err != nil {
|
||||
// clean up in case of context cancel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
e.Resign(client.Ctx())
|
||||
default:
|
||||
e.leaderSession = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
e.hdr = resp.Header
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proclaim lets the leader announce a new value without another election.
|
||||
func (e *Election) Proclaim(ctx context.Context, val string) error {
|
||||
if e.leaderSession == nil {
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
txn := client.Txn(ctx).If(cmp)
|
||||
txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
|
||||
tresp, terr := txn.Commit()
|
||||
if terr != nil {
|
||||
return terr
|
||||
}
|
||||
if !tresp.Succeeded {
|
||||
e.leaderKey = ""
|
||||
return ErrElectionNotLeader
|
||||
}
|
||||
|
||||
e.hdr = tresp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resign lets a leader start a new election.
|
||||
func (e *Election) Resign(ctx context.Context) (err error) {
|
||||
if e.leaderSession == nil {
|
||||
return nil
|
||||
}
|
||||
client := e.session.Client()
|
||||
cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
|
||||
if err == nil {
|
||||
e.hdr = resp.Header
|
||||
}
|
||||
e.leaderKey = ""
|
||||
e.leaderSession = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Leader returns the leader value for the current election.
|
||||
func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
|
||||
client := e.session.Client()
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(resp.Kvs) == 0 {
|
||||
// no leader currently elected
|
||||
return nil, ErrElectionNoLeader
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Observe returns a channel that reliably observes ordered leader proposals
|
||||
// as GetResponse values on every current elected leader key. It will not
|
||||
// necessarily fetch all historical leader updates, but will always post the
|
||||
// most recent leader value.
|
||||
//
|
||||
// The channel closes when the context is canceled or the underlying watcher
|
||||
// is otherwise disrupted.
|
||||
func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
|
||||
retc := make(chan v3.GetResponse)
|
||||
go e.observe(ctx, retc)
|
||||
return retc
|
||||
}
|
||||
|
||||
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
client := e.session.Client()
|
||||
|
||||
defer close(ch)
|
||||
for {
|
||||
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var kv *mvccpb.KeyValue
|
||||
var hdr *pb.ResponseHeader
|
||||
|
||||
if len(resp.Kvs) == 0 {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
// wait for first key put on prefix
|
||||
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
|
||||
wch := client.Watch(cctx, e.keyPrefix, opts...)
|
||||
for kv == nil {
|
||||
wr, ok := <-wch
|
||||
if !ok || wr.Err() != nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
// only accept PUTs; a DELETE will make observe() spin
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.PUT {
|
||||
hdr, kv = &wr.Header, ev.Kv
|
||||
// may have multiple revs; hdr.rev = the last rev
|
||||
// set to kv's rev in case batch has multiple PUTs
|
||||
hdr.Revision = kv.ModRevision
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
} else {
|
||||
hdr, kv = resp.Header, resp.Kvs[0]
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
|
||||
keyDeleted := false
|
||||
for !keyDeleted {
|
||||
wr, ok := <-wch
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
keyDeleted = true
|
||||
break
|
||||
}
|
||||
resp.Header = &wr.Header
|
||||
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
|
||||
select {
|
||||
case ch <- *resp:
|
||||
case <-cctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the leader key if elected, empty string otherwise.
|
||||
func (e *Election) Key() string { return e.leaderKey }
|
||||
|
||||
// Rev returns the leader key's creation revision, if elected.
|
||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||
|
||||
// Header is the response header from the last successful election proposal.
|
||||
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
65
vendor/github.com/coreos/etcd/clientv3.old/concurrency/key.go
generated
vendored
Normal file
65
vendor/github.com/coreos/etcd/clientv3.old/concurrency/key.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wr v3.WatchResponse
|
||||
wch := client.Watch(cctx, key, v3.WithRev(rev))
|
||||
for wr = range wch {
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.DELETE {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := wr.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("lost watcher waiting for delete")
|
||||
}
|
||||
|
||||
// waitDeletes efficiently waits until all keys matching the prefix and no greater
|
||||
// than the create revision.
|
||||
func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
|
||||
getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
|
||||
for {
|
||||
resp, err := client.Get(ctx, pfx, getOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Kvs) == 0 {
|
||||
return resp.Header, nil
|
||||
}
|
||||
lastKey := string(resp.Kvs[0].Key)
|
||||
if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
118
vendor/github.com/coreos/etcd/clientv3.old/concurrency/mutex.go
generated
vendored
Normal file
118
vendor/github.com/coreos/etcd/clientv3.old/concurrency/mutex.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Mutex implements the sync Locker interface with etcd
|
||||
type Mutex struct {
|
||||
s *Session
|
||||
|
||||
pfx string
|
||||
myKey string
|
||||
myRev int64
|
||||
hdr *pb.ResponseHeader
|
||||
}
|
||||
|
||||
func NewMutex(s *Session, pfx string) *Mutex {
|
||||
return &Mutex{s, pfx + "/", "", -1, nil}
|
||||
}
|
||||
|
||||
// Lock locks the mutex with a cancelable context. If the context is canceled
|
||||
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
|
||||
func (m *Mutex) Lock(ctx context.Context) error {
|
||||
s := m.s
|
||||
client := m.s.Client()
|
||||
|
||||
m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
|
||||
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
|
||||
// put self in lock waiters via myKey; oldest waiter holds lock
|
||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||
// reuse key in case this session already holds the lock
|
||||
get := v3.OpGet(m.myKey)
|
||||
// fetch current holder to complete uncontended path with only one RPC
|
||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.myRev = resp.Header.Revision
|
||||
if !resp.Succeeded {
|
||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||
}
|
||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||
m.hdr = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for deletion revisions prior to myKey
|
||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
// release lock key if cancelled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.Unlock(client.Ctx())
|
||||
default:
|
||||
m.hdr = hdr
|
||||
}
|
||||
return werr
|
||||
}
|
||||
|
||||
func (m *Mutex) Unlock(ctx context.Context) error {
|
||||
client := m.s.Client()
|
||||
if _, err := client.Delete(ctx, m.myKey); err != nil {
|
||||
return err
|
||||
}
|
||||
m.myKey = "\x00"
|
||||
m.myRev = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Mutex) IsOwner() v3.Cmp {
|
||||
return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
|
||||
}
|
||||
|
||||
func (m *Mutex) Key() string { return m.myKey }
|
||||
|
||||
// Header is the response header received from etcd on acquiring the lock.
|
||||
func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
|
||||
|
||||
type lockerMutex struct{ *Mutex }
|
||||
|
||||
func (lm *lockerMutex) Lock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Lock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
func (lm *lockerMutex) Unlock() {
|
||||
client := lm.s.Client()
|
||||
if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewLocker creates a sync.Locker backed by an etcd mutex.
|
||||
func NewLocker(s *Session, pfx string) sync.Locker {
|
||||
return &lockerMutex{NewMutex(s, pfx)}
|
||||
}
|
140
vendor/github.com/coreos/etcd/clientv3.old/concurrency/session.go
generated
vendored
Normal file
140
vendor/github.com/coreos/etcd/clientv3.old/concurrency/session.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const defaultSessionTTL = 60
|
||||
|
||||
// Session represents a lease kept alive for the lifetime of a client.
|
||||
// Fault-tolerant applications may use sessions to reason about liveness.
|
||||
type Session struct {
|
||||
client *v3.Client
|
||||
opts *sessionOptions
|
||||
id v3.LeaseID
|
||||
|
||||
cancel context.CancelFunc
|
||||
donec <-chan struct{}
|
||||
}
|
||||
|
||||
// NewSession gets the leased session for a client.
|
||||
func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
|
||||
for _, opt := range opts {
|
||||
opt(ops)
|
||||
}
|
||||
|
||||
id := ops.leaseID
|
||||
if id == v3.NoLease {
|
||||
resp, err := client.Grant(ops.ctx, int64(ops.ttl))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id = v3.LeaseID(resp.ID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
|
||||
|
||||
// keep the lease alive until client error or cancelled context
|
||||
go func() {
|
||||
defer close(donec)
|
||||
for range keepAlive {
|
||||
// eat messages until keep alive channel closes
|
||||
}
|
||||
}()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Client is the etcd client that is attached to the session.
|
||||
func (s *Session) Client() *v3.Client {
|
||||
return s.client
|
||||
}
|
||||
|
||||
// Lease is the lease ID for keys bound to the session.
|
||||
func (s *Session) Lease() v3.LeaseID { return s.id }
|
||||
|
||||
// Done returns a channel that closes when the lease is orphaned, expires, or
|
||||
// is otherwise no longer being refreshed.
|
||||
func (s *Session) Done() <-chan struct{} { return s.donec }
|
||||
|
||||
// Orphan ends the refresh for the session lease. This is useful
|
||||
// in case the state of the client connection is indeterminate (revoke
|
||||
// would fail) or when transferring lease ownership.
|
||||
func (s *Session) Orphan() {
|
||||
s.cancel()
|
||||
<-s.donec
|
||||
}
|
||||
|
||||
// Close orphans the session and revokes the session lease.
|
||||
func (s *Session) Close() error {
|
||||
s.Orphan()
|
||||
// if revoke takes longer than the ttl, lease is expired anyway
|
||||
ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
|
||||
_, err := s.client.Revoke(ctx, s.id)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
type sessionOptions struct {
|
||||
ttl int
|
||||
leaseID v3.LeaseID
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// SessionOption configures Session.
|
||||
type SessionOption func(*sessionOptions)
|
||||
|
||||
// WithTTL configures the session's TTL in seconds.
|
||||
// If TTL is <= 0, the default 60 seconds TTL will be used.
|
||||
func WithTTL(ttl int) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
if ttl > 0 {
|
||||
so.ttl = ttl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithLease specifies the existing leaseID to be used for the session.
|
||||
// This is useful in process restart scenario, for example, to reclaim
|
||||
// leadership from an election prior to restart.
|
||||
func WithLease(leaseID v3.LeaseID) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.leaseID = leaseID
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext assigns a context to the session instead of defaulting to
|
||||
// using the client context. This is useful for canceling NewSession and
|
||||
// Close operations immediately without having to close the client. If the
|
||||
// context is canceled before Close() completes, the session's lease will be
|
||||
// abandoned and left to expire instead of being revoked.
|
||||
func WithContext(ctx context.Context) SessionOption {
|
||||
return func(so *sessionOptions) {
|
||||
so.ctx = ctx
|
||||
}
|
||||
}
|
386
vendor/github.com/coreos/etcd/clientv3.old/concurrency/stm.go
generated
vendored
Normal file
386
vendor/github.com/coreos/etcd/clientv3.old/concurrency/stm.go
generated
vendored
Normal file
@ -0,0 +1,386 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package concurrency
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// STM is an interface for software transactional memory.
|
||||
type STM interface {
|
||||
// Get returns the value for a key and inserts the key in the txn's read set.
|
||||
// If Get fails, it aborts the transaction with an error, never returning.
|
||||
Get(key ...string) string
|
||||
// Put adds a value for a key to the write set.
|
||||
Put(key, val string, opts ...v3.OpOption)
|
||||
// Rev returns the revision of a key in the read set.
|
||||
Rev(key string) int64
|
||||
// Del deletes a key.
|
||||
Del(key string)
|
||||
|
||||
// commit attempts to apply the txn's changes to the server.
|
||||
commit() *v3.TxnResponse
|
||||
reset()
|
||||
}
|
||||
|
||||
// Isolation is an enumeration of transactional isolation levels which
|
||||
// describes how transactions should interfere and conflict.
|
||||
type Isolation int
|
||||
|
||||
const (
|
||||
// SerializableSnapshot provides serializable isolation and also checks
|
||||
// for write conflicts.
|
||||
SerializableSnapshot Isolation = iota
|
||||
// Serializable reads within the same transaction attempt return data
|
||||
// from the at the revision of the first read.
|
||||
Serializable
|
||||
// RepeatableReads reads within the same transaction attempt always
|
||||
// return the same data.
|
||||
RepeatableReads
|
||||
// ReadCommitted reads keys from any committed revision.
|
||||
ReadCommitted
|
||||
)
|
||||
|
||||
// stmError safely passes STM errors through panic to the STM error channel.
|
||||
type stmError struct{ err error }
|
||||
|
||||
type stmOptions struct {
|
||||
iso Isolation
|
||||
ctx context.Context
|
||||
prefetch []string
|
||||
}
|
||||
|
||||
type stmOption func(*stmOptions)
|
||||
|
||||
// WithIsolation specifies the transaction isolation level.
|
||||
func WithIsolation(lvl Isolation) stmOption {
|
||||
return func(so *stmOptions) { so.iso = lvl }
|
||||
}
|
||||
|
||||
// WithAbortContext specifies the context for permanently aborting the transaction.
|
||||
func WithAbortContext(ctx context.Context) stmOption {
|
||||
return func(so *stmOptions) { so.ctx = ctx }
|
||||
}
|
||||
|
||||
// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
|
||||
// If an STM transaction will unconditionally fetch a set of keys, prefetching
|
||||
// those keys will save the round-trip cost from requesting each key one by one
|
||||
// with Get().
|
||||
func WithPrefetch(keys ...string) stmOption {
|
||||
return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
|
||||
}
|
||||
|
||||
// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
|
||||
func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
|
||||
opts := &stmOptions{ctx: c.Ctx()}
|
||||
for _, f := range so {
|
||||
f(opts)
|
||||
}
|
||||
if len(opts.prefetch) != 0 {
|
||||
f := apply
|
||||
apply = func(s STM) error {
|
||||
s.Get(opts.prefetch...)
|
||||
return f(s)
|
||||
}
|
||||
}
|
||||
return runSTM(mkSTM(c, opts), apply)
|
||||
}
|
||||
|
||||
func mkSTM(c *v3.Client, opts *stmOptions) STM {
|
||||
switch opts.iso {
|
||||
case SerializableSnapshot:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp {
|
||||
return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
|
||||
}
|
||||
return s
|
||||
case Serializable:
|
||||
s := &stmSerializable{
|
||||
stm: stm{client: c, ctx: opts.ctx},
|
||||
prefetch: make(map[string]*v3.GetResponse),
|
||||
}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case RepeatableReads:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
|
||||
return s
|
||||
case ReadCommitted:
|
||||
s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
|
||||
s.conflicts = func() []v3.Cmp { return nil }
|
||||
return s
|
||||
default:
|
||||
panic("unsupported stm")
|
||||
}
|
||||
}
|
||||
|
||||
type stmResponse struct {
|
||||
resp *v3.TxnResponse
|
||||
err error
|
||||
}
|
||||
|
||||
func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
outc := make(chan stmResponse, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
e, ok := r.(stmError)
|
||||
if !ok {
|
||||
// client apply panicked
|
||||
panic(r)
|
||||
}
|
||||
outc <- stmResponse{nil, e.err}
|
||||
}
|
||||
}()
|
||||
var out stmResponse
|
||||
for {
|
||||
s.reset()
|
||||
if out.err = apply(s); out.err != nil {
|
||||
break
|
||||
}
|
||||
if out.resp = s.commit(); out.resp != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
outc <- out
|
||||
}()
|
||||
r := <-outc
|
||||
return r.resp, r.err
|
||||
}
|
||||
|
||||
// stm implements repeatable-read software transactional memory over etcd
|
||||
type stm struct {
|
||||
client *v3.Client
|
||||
ctx context.Context
|
||||
// rset holds read key values and revisions
|
||||
rset readSet
|
||||
// wset holds overwritten keys and their values
|
||||
wset writeSet
|
||||
// getOpts are the opts used for gets
|
||||
getOpts []v3.OpOption
|
||||
// conflicts computes the current conflicts on the txn
|
||||
conflicts func() []v3.Cmp
|
||||
}
|
||||
|
||||
type stmPut struct {
|
||||
val string
|
||||
op v3.Op
|
||||
}
|
||||
|
||||
type readSet map[string]*v3.GetResponse
|
||||
|
||||
func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||
for i, resp := range txnresp.Responses {
|
||||
rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
|
||||
}
|
||||
}
|
||||
|
||||
func (rs readSet) first() int64 {
|
||||
ret := int64(math.MaxInt64 - 1)
|
||||
for _, resp := range rs {
|
||||
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret {
|
||||
ret = resp.Kvs[0].ModRevision
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// cmps guards the txn from updates to read set
|
||||
func (rs readSet) cmps() []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(rs))
|
||||
for k, rk := range rs {
|
||||
cmps = append(cmps, isKeyCurrent(k, rk))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
type writeSet map[string]stmPut
|
||||
|
||||
func (ws writeSet) get(keys ...string) *stmPut {
|
||||
for _, key := range keys {
|
||||
if wv, ok := ws[key]; ok {
|
||||
return &wv
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cmps returns a cmp list testing no writes have happened past rev
|
||||
func (ws writeSet) cmps(rev int64) []v3.Cmp {
|
||||
cmps := make([]v3.Cmp, 0, len(ws))
|
||||
for key := range ws {
|
||||
cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
|
||||
}
|
||||
return cmps
|
||||
}
|
||||
|
||||
// puts is the list of ops for all pending writes
|
||||
func (ws writeSet) puts() []v3.Op {
|
||||
puts := make([]v3.Op, 0, len(ws))
|
||||
for _, v := range ws {
|
||||
puts = append(puts, v.op)
|
||||
}
|
||||
return puts
|
||||
}
|
||||
|
||||
func (s *stm) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
return respToValue(s.fetch(keys...))
|
||||
}
|
||||
|
||||
func (s *stm) Put(key, val string, opts ...v3.OpOption) {
|
||||
s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
|
||||
}
|
||||
|
||||
func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
|
||||
|
||||
func (s *stm) Rev(key string) int64 {
|
||||
if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
|
||||
return resp.Kvs[0].ModRevision
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *stm) commit() *v3.TxnResponse {
|
||||
txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stm) fetch(keys ...string) *v3.GetResponse {
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
ops := make([]v3.Op, len(keys))
|
||||
for i, key := range keys {
|
||||
if resp, ok := s.rset[key]; ok {
|
||||
return resp
|
||||
}
|
||||
ops[i] = v3.OpGet(key, s.getOpts...)
|
||||
}
|
||||
txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
s.rset.add(keys, txnresp)
|
||||
return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
|
||||
}
|
||||
|
||||
func (s *stm) reset() {
|
||||
s.rset = make(map[string]*v3.GetResponse)
|
||||
s.wset = make(map[string]stmPut)
|
||||
}
|
||||
|
||||
type stmSerializable struct {
|
||||
stm
|
||||
prefetch map[string]*v3.GetResponse
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Get(keys ...string) string {
|
||||
if wv := s.wset.get(keys...); wv != nil {
|
||||
return wv.val
|
||||
}
|
||||
firstRead := len(s.rset) == 0
|
||||
for _, key := range keys {
|
||||
if resp, ok := s.prefetch[key]; ok {
|
||||
delete(s.prefetch, key)
|
||||
s.rset[key] = resp
|
||||
}
|
||||
}
|
||||
resp := s.stm.fetch(keys...)
|
||||
if firstRead {
|
||||
// txn's base revision is defined by the first read
|
||||
s.getOpts = []v3.OpOption{
|
||||
v3.WithRev(resp.Header.Revision),
|
||||
v3.WithSerializable(),
|
||||
}
|
||||
}
|
||||
return respToValue(resp)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) Rev(key string) int64 {
|
||||
s.Get(key)
|
||||
return s.stm.Rev(key)
|
||||
}
|
||||
|
||||
func (s *stmSerializable) gets() ([]string, []v3.Op) {
|
||||
keys := make([]string, 0, len(s.rset))
|
||||
ops := make([]v3.Op, 0, len(s.rset))
|
||||
for k := range s.rset {
|
||||
keys = append(keys, k)
|
||||
ops = append(ops, v3.OpGet(k))
|
||||
}
|
||||
return keys, ops
|
||||
}
|
||||
|
||||
func (s *stmSerializable) commit() *v3.TxnResponse {
|
||||
keys, getops := s.gets()
|
||||
txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
|
||||
// use Else to prefetch keys in case of conflict to save a round trip
|
||||
txnresp, err := txn.Else(getops...).Commit()
|
||||
if err != nil {
|
||||
panic(stmError{err})
|
||||
}
|
||||
if txnresp.Succeeded {
|
||||
return txnresp
|
||||
}
|
||||
// load prefetch with Else data
|
||||
s.rset.add(keys, txnresp)
|
||||
s.prefetch = s.rset
|
||||
s.getOpts = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
|
||||
if len(r.Kvs) != 0 {
|
||||
return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
|
||||
}
|
||||
return v3.Compare(v3.ModRevision(k), "=", 0)
|
||||
}
|
||||
|
||||
func respToValue(resp *v3.GetResponse) string {
|
||||
if resp == nil || len(resp.Kvs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(resp.Kvs[0].Value)
|
||||
}
|
||||
|
||||
// NewSTMRepeatable is deprecated.
|
||||
func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
|
||||
}
|
||||
|
||||
// NewSTMSerializable is deprecated.
|
||||
func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
|
||||
}
|
||||
|
||||
// NewSTMReadCommitted is deprecated.
|
||||
func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
|
||||
return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
|
||||
}
|
62
vendor/github.com/coreos/etcd/clientv3.old/config.go
generated
vendored
Normal file
62
vendor/github.com/coreos/etcd/clientv3.old/config.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Endpoints is a list of URLs.
|
||||
Endpoints []string `json:"endpoints"`
|
||||
|
||||
// AutoSyncInterval is the interval to update endpoints with its latest members.
|
||||
// 0 disables auto-sync. By default auto-sync is disabled.
|
||||
AutoSyncInterval time.Duration `json:"auto-sync-interval"`
|
||||
|
||||
// DialTimeout is the timeout for failing to establish a connection.
|
||||
DialTimeout time.Duration `json:"dial-timeout"`
|
||||
|
||||
// DialKeepAliveTime is the time in seconds after which client pings the server to see if
|
||||
// transport is alive.
|
||||
DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
|
||||
|
||||
// DialKeepAliveTimeout is the time in seconds that the client waits for a response for the
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
Password string `json:"password"`
|
||||
|
||||
// RejectOldCluster when set will refuse to create a client against an outdated cluster.
|
||||
RejectOldCluster bool `json:"reject-old-cluster"`
|
||||
|
||||
// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
|
||||
DialOptions []grpc.DialOption
|
||||
|
||||
// Context is the default client context; it can be used to cancel grpc dial out and
|
||||
// other operations that do not have an explicit context.
|
||||
Context context.Context
|
||||
}
|
64
vendor/github.com/coreos/etcd/clientv3.old/doc.go
generated
vendored
Normal file
64
vendor/github.com/coreos/etcd/clientv3.old/doc.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package clientv3 implements the official Go etcd client for v3.
|
||||
//
|
||||
// Create client using `clientv3.New`:
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{
|
||||
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
// DialTimeout: 5 * time.Second,
|
||||
// })
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// defer cli.Close()
|
||||
//
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
// cancel()
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
// // use the response
|
||||
//
|
||||
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
//
|
||||
// etcd client returns 2 types of errors:
|
||||
//
|
||||
// 1. context error: canceled or deadline exceeded.
|
||||
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
|
||||
//
|
||||
// Here is the example code to handle client errors:
|
||||
//
|
||||
// resp, err := kvc.Put(ctx, "", "")
|
||||
// if err != nil {
|
||||
// if err == context.Canceled {
|
||||
// // ctx is canceled by another routine
|
||||
// } else if err == context.DeadlineExceeded {
|
||||
// // ctx is attached with a deadline and it exceeded
|
||||
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
|
||||
// // process (verr.Errors)
|
||||
// } else {
|
||||
// // bad cluster endpoints, which are not etcd servers
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package clientv3
|
103
vendor/github.com/coreos/etcd/clientv3.old/example_cluster_test.go
generated
vendored
Normal file
103
vendor/github.com/coreos/etcd/clientv3.old/example_cluster_test.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleCluster_memberList() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("members:", len(resp.Members))
|
||||
// Output: members: 3
|
||||
}
|
||||
|
||||
func ExampleCluster_memberAdd() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints[:2],
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
peerURLs := endpoints[2:]
|
||||
mresp, err := cli.MemberAdd(context.Background(), peerURLs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("added member.PeerURLs:", mresp.Member.PeerURLs)
|
||||
// added member.PeerURLs: [http://localhost:32380]
|
||||
}
|
||||
|
||||
func ExampleCluster_memberRemove() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints[1:],
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.MemberRemove(context.Background(), resp.Members[0].ID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCluster_memberUpdate() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.MemberList(context.Background())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
peerURLs := []string{"http://localhost:12380"}
|
||||
_, err = cli.MemberUpdate(context.Background(), resp.Members[0].ID, peerURLs)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
279
vendor/github.com/coreos/etcd/clientv3.old/example_kv_test.go
generated
vendored
Normal file
279
vendor/github.com/coreos/etcd/clientv3.old/example_kv_test.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleKV_put() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, "sample_key", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKV_putErrorHandling() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, "", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
switch err {
|
||||
case context.Canceled:
|
||||
fmt.Printf("ctx is canceled by another routine: %v\n", err)
|
||||
case context.DeadlineExceeded:
|
||||
fmt.Printf("ctx is attached with a deadline is exceeded: %v\n", err)
|
||||
case rpctypes.ErrEmptyKey:
|
||||
fmt.Printf("client-side error: %v\n", err)
|
||||
default:
|
||||
fmt.Printf("bad cluster endpoints, which are not etcd servers: %v\n", err)
|
||||
}
|
||||
}
|
||||
// Output: client-side error: etcdserver: key is not provided
|
||||
}
|
||||
|
||||
func ExampleKV_get() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: foo : bar
|
||||
}
|
||||
|
||||
func ExampleKV_getWithRev() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
presp, err := cli.Put(context.TODO(), "foo", "bar1")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar2")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo", clientv3.WithRev(presp.Header.Revision))
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: foo : bar1
|
||||
}
|
||||
|
||||
func ExampleKV_getSortedPrefix() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
for i := range make([]int, 3) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Put(ctx, fmt.Sprintf("key_%d", i), "value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "key", clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortDescend))
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output:
|
||||
// key_2 : value
|
||||
// key_1 : value
|
||||
// key_0 : value
|
||||
}
|
||||
|
||||
func ExampleKV_delete() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// count keys about to be deleted
|
||||
gresp, err := cli.Get(ctx, "key", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// delete the keys
|
||||
dresp, err := cli.Delete(ctx, "key", clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Deleted all keys:", int64(len(gresp.Kvs)) == dresp.Deleted)
|
||||
// Output:
|
||||
// Deleted all keys: true
|
||||
}
|
||||
|
||||
func ExampleKV_compact() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
resp, err := cli.Get(ctx, "foo")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
compRev := resp.Header.Revision // specify compact revision of your choice
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = cli.Compact(ctx, compRev)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleKV_txn() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
_, err = kvc.Put(context.TODO(), "key", "xyz")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = kvc.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
|
||||
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
|
||||
Else(clientv3.OpPut("key", "ABC")).
|
||||
Commit()
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
gresp, err := kvc.Get(context.TODO(), "key")
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, ev := range gresp.Kvs {
|
||||
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
|
||||
}
|
||||
// Output: key : XYZ
|
||||
}
|
||||
|
||||
func ExampleKV_do() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
ops := []clientv3.Op{
|
||||
clientv3.OpPut("put-key", "123"),
|
||||
clientv3.OpGet("put-key"),
|
||||
clientv3.OpPut("put-key", "456")}
|
||||
|
||||
for _, op := range ops {
|
||||
if _, err := cli.Do(context.TODO(), op); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
141
vendor/github.com/coreos/etcd/clientv3.old/example_lease_test.go
generated
vendored
Normal file
141
vendor/github.com/coreos/etcd/clientv3.old/example_lease_test.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleLease_grant() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// minimum lease TTL is 5-second
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// after 5 seconds, the key 'foo' will be removed
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleLease_revoke() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// revoking lease expires the key attached to its lease ID
|
||||
_, err = cli.Revoke(context.TODO(), resp.ID)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
gresp, err := cli.Get(context.TODO(), "foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println("number of keys:", len(gresp.Kvs))
|
||||
// Output: number of keys: 0
|
||||
}
|
||||
|
||||
func ExampleLease_keepAlive() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// the key 'foo' will be kept forever
|
||||
ch, kaerr := cli.KeepAlive(context.TODO(), resp.ID)
|
||||
if kaerr != nil {
|
||||
log.Fatal(kaerr)
|
||||
}
|
||||
|
||||
ka := <-ch
|
||||
fmt.Println("ttl:", ka.TTL)
|
||||
// Output: ttl: 5
|
||||
}
|
||||
|
||||
func ExampleLease_keepAliveOnce() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// to renew the lease only once
|
||||
ka, kaerr := cli.KeepAliveOnce(context.TODO(), resp.ID)
|
||||
if kaerr != nil {
|
||||
log.Fatal(kaerr)
|
||||
}
|
||||
|
||||
fmt.Println("ttl:", ka.TTL)
|
||||
// Output: ttl: 5
|
||||
}
|
68
vendor/github.com/coreos/etcd/clientv3.old/example_maintenence_test.go
generated
vendored
Normal file
68
vendor/github.com/coreos/etcd/clientv3.old/example_maintenence_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
func ExampleMaintenance_status() {
|
||||
for _, ep := range endpoints {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// resp, err := cli.Status(context.Background(), ep)
|
||||
//
|
||||
// or
|
||||
//
|
||||
mapi := clientv3.NewMaintenance(cli)
|
||||
resp, err := mapi.Status(context.Background(), ep)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
}
|
||||
// endpoint: localhost:2379 / IsLeader: false
|
||||
// endpoint: localhost:22379 / IsLeader: false
|
||||
// endpoint: localhost:32379 / IsLeader: true
|
||||
}
|
||||
|
||||
func ExampleMaintenance_defragment() {
|
||||
for _, ep := range endpoints {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
if _, err = cli.Defragment(context.TODO(), ep); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
84
vendor/github.com/coreos/etcd/clientv3.old/example_metrics_test.go
generated
vendored
Normal file
84
vendor/github.com/coreos/etcd/clientv3.old/example_metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func ExampleClient_metrics() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// get a key so it shows up in the metrics as a range rpc
|
||||
cli.Get(context.TODO(), "test_key")
|
||||
|
||||
// listen for all prometheus metrics
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
http.Serve(ln, prometheus.Handler())
|
||||
}()
|
||||
defer func() {
|
||||
ln.Close()
|
||||
<-donec
|
||||
}()
|
||||
|
||||
// make an http request to fetch all prometheus metrics
|
||||
url := "http://" + ln.Addr().String() + "/metrics"
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: %v", err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("fetch error: reading %s: %v", url, err)
|
||||
}
|
||||
|
||||
// confirm range request in metrics
|
||||
for _, l := range strings.Split(string(b), "\n") {
|
||||
if strings.Contains(l, `grpc_client_started_total{grpc_method="Range"`) {
|
||||
fmt.Println(l)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
}
|
76
vendor/github.com/coreos/etcd/clientv3.old/example_test.go
generated
vendored
Normal file
76
vendor/github.com/coreos/etcd/clientv3.old/example_test.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
dialTimeout = 5 * time.Second
|
||||
requestTimeout = 10 * time.Second
|
||||
endpoints = []string{"localhost:2379", "localhost:22379", "localhost:32379"}
|
||||
)
|
||||
|
||||
func Example() {
|
||||
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
|
||||
clientv3.SetLogger(plog)
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close() // make sure to close the client
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleConfig_withTLS() {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: "/tmp/test-certs/test-name-1.pem",
|
||||
KeyFile: "/tmp/test-certs/test-name-1-key.pem",
|
||||
TrustedCAFile: "/tmp/test-certs/trusted-ca.pem",
|
||||
}
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
TLS: tlsConfig,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close() // make sure to close the client
|
||||
|
||||
_, err = cli.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
100
vendor/github.com/coreos/etcd/clientv3.old/example_watch_test.go
generated
vendored
Normal file
100
vendor/github.com/coreos/etcd/clientv3.old/example_watch_test.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleWatcher_watch() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo")
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithPrefix() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo", clientv3.WithPrefix())
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo1" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithRange() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// watches within ['foo1', 'foo4'), in lexicographical order
|
||||
rch := cli.Watch(context.Background(), "foo1", clientv3.WithRange("foo4"))
|
||||
for wresp := range rch {
|
||||
for _, ev := range wresp.Events {
|
||||
fmt.Printf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
|
||||
}
|
||||
}
|
||||
// PUT "foo1" : "bar"
|
||||
// PUT "foo2" : "bar"
|
||||
// PUT "foo3" : "bar"
|
||||
}
|
||||
|
||||
func ExampleWatcher_watchWithProgressNotify() {
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: dialTimeout,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rch := cli.Watch(context.Background(), "foo", clientv3.WithProgressNotify())
|
||||
wresp := <-rch
|
||||
fmt.Printf("wresp.Header.Revision: %d\n", wresp.Header.Revision)
|
||||
fmt.Println("wresp.IsProgressNotify:", wresp.IsProgressNotify())
|
||||
// wresp.Header.Revision: 0
|
||||
// wresp.IsProgressNotify: true
|
||||
}
|
128
vendor/github.com/coreos/etcd/clientv3.old/integration/cluster_test.go
generated
vendored
Normal file
128
vendor/github.com/coreos/etcd/clientv3.old/integration/cluster_test.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestMemberList(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Members) != 3 {
|
||||
t.Errorf("number of members = %d, want %d", len(resp.Members), 3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberAdd(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
|
||||
urls := []string{"http://127.0.0.1:1234"}
|
||||
resp, err := capi.MemberAdd(context.Background(), urls)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to add member %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.Member.PeerURLs, urls) {
|
||||
t.Errorf("urls = %v, want %v", urls, resp.Member.PeerURLs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberRemove(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.Client(1)
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
rmvID := resp.Members[0].ID
|
||||
// indexes in capi member list don't necessarily match cluster member list;
|
||||
// find member that is not the client to remove
|
||||
for _, m := range resp.Members {
|
||||
mURLs, _ := types.NewURLs(m.PeerURLs)
|
||||
if !reflect.DeepEqual(mURLs, clus.Members[1].ServerConfig.PeerURLs) {
|
||||
rmvID = m.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_, err = capi.MemberRemove(context.Background(), rmvID)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to remove member %v", err)
|
||||
}
|
||||
|
||||
resp, err = capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Members) != 2 {
|
||||
t.Errorf("number of members = %d, want %d", len(resp.Members), 2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemberUpdate(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
capi := clus.RandClient()
|
||||
resp, err := capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
urls := []string{"http://127.0.0.1:1234"}
|
||||
_, err = capi.MemberUpdate(context.Background(), resp.Members[0].ID, urls)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to update member %v", err)
|
||||
}
|
||||
|
||||
resp, err = capi.MemberList(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to list member %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(resp.Members[0].PeerURLs, urls) {
|
||||
t.Errorf("urls = %v, want %v", urls, resp.Members[0].PeerURLs)
|
||||
}
|
||||
}
|
191
vendor/github.com/coreos/etcd/clientv3.old/integration/dial_test.go
generated
vendored
Normal file
191
vendor/github.com/coreos/etcd/clientv3.old/integration/dial_test.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: "../../integration/fixtures/server.key.insecure",
|
||||
CertFile: "../../integration/fixtures/server.crt",
|
||||
TrustedCAFile: "../../integration/fixtures/ca.crt",
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
|
||||
testTLSInfoExpired = transport.TLSInfo{
|
||||
KeyFile: "../../integration/fixtures-expired/server-key.pem",
|
||||
CertFile: "../../integration/fixtures-expired/server.pem",
|
||||
TrustedCAFile: "../../integration/fixtures-expired/etcd-root-ca.pem",
|
||||
ClientCertAuth: true,
|
||||
}
|
||||
)
|
||||
|
||||
// TestDialTLSExpired tests client with expired certs fails to dial.
|
||||
func TestDialTLSExpired(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
tls, err := testTLSInfoExpired.ClientConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect remote errors 'tls: bad certificate'
|
||||
_, err = clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialTimeout: 3 * time.Second,
|
||||
TLS: tls,
|
||||
})
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDialTLSNoConfig ensures the client fails to dial / times out
|
||||
// when TLS endpoints (https, unixs) are given but no tls config.
|
||||
func TestDialTLSNoConfig(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
|
||||
defer clus.Terminate(t)
|
||||
// expect 'signed by unknown authority'
|
||||
_, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialTimeout: time.Second,
|
||||
})
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func TestDialSetEndpointsBeforeFail(t *testing.T) {
|
||||
testDialSetEndpoints(t, true)
|
||||
}
|
||||
|
||||
func TestDialSetEndpointsAfterFail(t *testing.T) {
|
||||
testDialSetEndpoints(t, false)
|
||||
}
|
||||
|
||||
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
|
||||
func testDialSetEndpoints(t *testing.T, setBefore bool) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 3)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
toKill := rand.Intn(len(eps))
|
||||
|
||||
cfg := clientv3.Config{Endpoints: []string{eps[toKill]}, DialTimeout: 1 * time.Second}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
if setBefore {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
// make a dead node
|
||||
clus.Members[toKill].Stop(t)
|
||||
clus.WaitLeader(t)
|
||||
|
||||
if !setBefore {
|
||||
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
|
||||
// with a new one that doesn't include original endpoint.
|
||||
func TestSwitchSetEndpoints(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get non partitioned members endpoints
|
||||
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].InjectPartition(t, clus.Members[1:])
|
||||
|
||||
cli.SetEndpoints(eps...)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if _, err := cli.Get(ctx, "foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejectOldCluster(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
// 2 endpoints to test multi-endpoint Status
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()},
|
||||
DialTimeout: 5 * time.Second,
|
||||
RejectOldCluster: true,
|
||||
}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli.Close()
|
||||
}
|
||||
|
||||
// TestDialForeignEndpoint checks an endpoint that is not registered
|
||||
// with the balancer can be dialed.
|
||||
func TestDialForeignEndpoint(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// grpc can return a lazy connection that's not connected yet; confirm
|
||||
// that it can communicate with the cluster.
|
||||
kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn))
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
defer cancel()
|
||||
if _, gerr := kvc.Get(ctx, "abc"); gerr != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
17
vendor/github.com/coreos/etcd/clientv3.old/integration/doc.go
generated
vendored
Normal file
17
vendor/github.com/coreos/etcd/clientv3.old/integration/doc.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package integration implements tests built upon embedded etcd, and focuses on
|
||||
// correctness of etcd client.
|
||||
package integration
|
897
vendor/github.com/coreos/etcd/clientv3.old/integration/kv_test.go
generated
vendored
Normal file
897
vendor/github.com/coreos/etcd/clientv3.old/integration/kv_test.go
generated
vendored
Normal file
@ -0,0 +1,897 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestKVPutError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
maxReqBytes = 1.5 * 1024 * 1024 // hard coded max in v3_server.go
|
||||
quota = int64(int(maxReqBytes) + 8*os.Getpagesize())
|
||||
)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
_, err := kv.Put(ctx, "", "bar")
|
||||
if err != rpctypes.ErrEmptyKey {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
|
||||
if err != rpctypes.ErrRequestTooLarge {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
|
||||
if err != nil { // below quota
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second) // give enough time for commit
|
||||
|
||||
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
|
||||
if err != rpctypes.ErrNoSpace { // over quota
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVPut(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
key, val string
|
||||
leaseID clientv3.LeaseID
|
||||
}{
|
||||
{"foo", "bar", clientv3.NoLease},
|
||||
{"hello", "world", resp.ID},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
if _, err := kv.Put(ctx, tt.key, tt.val, clientv3.WithLease(tt.leaseID)); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", i, tt.key, err)
|
||||
}
|
||||
resp, err := kv.Get(ctx, tt.key)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't get key (%v)", i, err)
|
||||
}
|
||||
if len(resp.Kvs) != 1 {
|
||||
t.Fatalf("#%d: expected 1 key, got %d", i, len(resp.Kvs))
|
||||
}
|
||||
if !bytes.Equal([]byte(tt.val), resp.Kvs[0].Value) {
|
||||
t.Errorf("#%d: val = %s, want %s", i, tt.val, resp.Kvs[0].Value)
|
||||
}
|
||||
if tt.leaseID != clientv3.LeaseID(resp.Kvs[0].Lease) {
|
||||
t.Errorf("#%d: val = %d, want %d", i, tt.leaseID, resp.Kvs[0].Lease)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
|
||||
func TestKVPutWithIgnoreValue(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue())
|
||||
if err != rpctypes.ErrKeyNotFound {
|
||||
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rr, rerr := kv.Get(context.TODO(), "foo")
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
if len(rr.Kvs) != 1 {
|
||||
t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
|
||||
}
|
||||
if !bytes.Equal(rr.Kvs[0].Value, []byte("bar")) {
|
||||
t.Fatalf("value expected 'bar', got %q", rr.Kvs[0].Value)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
|
||||
func TestKVPutWithIgnoreLease(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); err != rpctypes.ErrKeyNotFound {
|
||||
t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rr, rerr := kv.Get(context.TODO(), "zoo")
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
if len(rr.Kvs) != 1 {
|
||||
t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
|
||||
}
|
||||
if rr.Kvs[0].Lease != int64(resp.ID) {
|
||||
t.Fatalf("lease expected %v, got %v", resp.ID, rr.Kvs[0].Lease)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVPutWithRequireLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
|
||||
// wait for election timeout, then member[0] will not have a leader.
|
||||
var (
|
||||
electionTicks = 10
|
||||
tickDuration = 10 * time.Millisecond
|
||||
)
|
||||
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
|
||||
|
||||
kv := clus.Client(0)
|
||||
_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
|
||||
if err != rpctypes.ErrNoLeader {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// clients may give timeout errors since the members are stopped; take
|
||||
// the clients so that terminating the cluster won't complain
|
||||
clus.Client(1).Close()
|
||||
clus.Client(2).Close()
|
||||
clus.TakeClient(1)
|
||||
clus.TakeClient(2)
|
||||
}
|
||||
|
||||
func TestKVRange(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
keySet := []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"}
|
||||
for i, key := range keySet {
|
||||
if _, err := kv.Put(ctx, key, ""); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", i, key, err)
|
||||
}
|
||||
}
|
||||
resp, err := kv.Get(ctx, keySet[0])
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key (%v)", err)
|
||||
}
|
||||
wheader := resp.Header
|
||||
|
||||
tests := []struct {
|
||||
begin, end string
|
||||
rev int64
|
||||
opts []clientv3.OpOption
|
||||
|
||||
wantSet []*mvccpb.KeyValue
|
||||
}{
|
||||
// range first two
|
||||
{
|
||||
"a", "c",
|
||||
0,
|
||||
nil,
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
},
|
||||
},
|
||||
// range first two with serializable
|
||||
{
|
||||
"a", "c",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSerializable()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with rev
|
||||
{
|
||||
"a", "x",
|
||||
2,
|
||||
nil,
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with countOnly
|
||||
{
|
||||
"a", "x",
|
||||
2,
|
||||
[]clientv3.OpOption{clientv3.WithCountOnly()},
|
||||
|
||||
nil,
|
||||
},
|
||||
// range all with SortByKey, SortAscend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByKey, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByKey, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortDescend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByCreateRevision, missing sorting order (ASCEND by default)
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortNone)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// range all with SortByModRevision, SortDescend
|
||||
{
|
||||
"a", "x",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithSort(clientv3.SortByModRevision, clientv3.SortDescend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
},
|
||||
},
|
||||
// WithPrefix
|
||||
{
|
||||
"foo", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithPrefix()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
},
|
||||
},
|
||||
// WithFromKey
|
||||
{
|
||||
"fo", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// fetch entire keyspace using WithFromKey
|
||||
{
|
||||
"\x00", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
// fetch entire keyspace using WithPrefix
|
||||
{
|
||||
"", "",
|
||||
0,
|
||||
[]clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
|
||||
|
||||
[]*mvccpb.KeyValue{
|
||||
{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
|
||||
{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
|
||||
{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
|
||||
{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
|
||||
{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
|
||||
{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
opts := []clientv3.OpOption{clientv3.WithRange(tt.end), clientv3.WithRev(tt.rev)}
|
||||
opts = append(opts, tt.opts...)
|
||||
resp, err := kv.Get(ctx, tt.begin, opts...)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't range (%v)", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(wheader, resp.Header) {
|
||||
t.Fatalf("#%d: wheader expected %+v, got %+v", i, wheader, resp.Header)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.wantSet, resp.Kvs) {
|
||||
t.Fatalf("#%d: resp.Kvs expected %+v, got %+v", i, tt.wantSet, resp.Kvs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVGetErrConnClosed(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
_, err := cli.Get(context.TODO(), "foo")
|
||||
if err != nil && err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clus.TakeClient(0)
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Get(context.TODO(), "foo"); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("kv.Get took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDeleteRange(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
tests := []struct {
|
||||
key string
|
||||
opts []clientv3.OpOption
|
||||
|
||||
wkeys []string
|
||||
}{
|
||||
// [a, c)
|
||||
{
|
||||
key: "a",
|
||||
opts: []clientv3.OpOption{clientv3.WithRange("c")},
|
||||
|
||||
wkeys: []string{"c", "c/abc", "d"},
|
||||
},
|
||||
// >= c
|
||||
{
|
||||
key: "c",
|
||||
opts: []clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
wkeys: []string{"a", "b"},
|
||||
},
|
||||
// c*
|
||||
{
|
||||
key: "c",
|
||||
opts: []clientv3.OpOption{clientv3.WithPrefix()},
|
||||
|
||||
wkeys: []string{"a", "b", "d"},
|
||||
},
|
||||
// *
|
||||
{
|
||||
key: "\x00",
|
||||
opts: []clientv3.OpOption{clientv3.WithFromKey()},
|
||||
|
||||
wkeys: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
keySet := []string{"a", "b", "c", "c/abc", "d"}
|
||||
for j, key := range keySet {
|
||||
if _, err := kv.Put(ctx, key, ""); err != nil {
|
||||
t.Fatalf("#%d: couldn't put %q (%v)", j, key, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := kv.Delete(ctx, tt.key, tt.opts...)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't delete range (%v)", i, err)
|
||||
}
|
||||
|
||||
resp, err := kv.Get(ctx, "a", clientv3.WithFromKey())
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: couldn't get keys (%v)", i, err)
|
||||
}
|
||||
keys := []string{}
|
||||
for _, kv := range resp.Kvs {
|
||||
keys = append(keys, string(kv.Key))
|
||||
}
|
||||
if !reflect.DeepEqual(tt.wkeys, keys) {
|
||||
t.Errorf("#%d: resp.Kvs got %v, expected %v", i, keys, tt.wkeys)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVDelete(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
presp, err := kv.Put(ctx, "foo", "")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
if presp.Header.Revision != 2 {
|
||||
t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
|
||||
}
|
||||
resp, err := kv.Delete(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't delete key (%v)", err)
|
||||
}
|
||||
if resp.Header.Revision != 3 {
|
||||
t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
|
||||
}
|
||||
gresp, err := kv.Get(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't get key (%v)", err)
|
||||
}
|
||||
if len(gresp.Kvs) > 0 {
|
||||
t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVCompactError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
}
|
||||
_, err := kv.Compact(ctx, 6)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't compact 6 (%v)", err)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 6)
|
||||
if err != rpctypes.ErrCompacted {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 100)
|
||||
if err != rpctypes.ErrFutureRev {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestKVCompact(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatalf("couldn't put 'foo' (%v)", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := kv.Compact(ctx, 7)
|
||||
if err != nil {
|
||||
t.Fatalf("couldn't compact kv space (%v)", err)
|
||||
}
|
||||
_, err = kv.Compact(ctx, 7)
|
||||
if err == nil || err != rpctypes.ErrCompacted {
|
||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
|
||||
}
|
||||
|
||||
wcli := clus.RandClient()
|
||||
// new watcher could precede receiving the compaction without quorum first
|
||||
wcli.Get(ctx, "quorum-get")
|
||||
|
||||
wchan := wcli.Watch(ctx, "foo", clientv3.WithRev(3))
|
||||
|
||||
if wr := <-wchan; wr.CompactRevision != 7 {
|
||||
t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision)
|
||||
}
|
||||
if wr, ok := <-wchan; ok {
|
||||
t.Fatalf("wchan got %v, expected closed", wr)
|
||||
}
|
||||
|
||||
_, err = kv.Compact(ctx, 1000)
|
||||
if err == nil || err != rpctypes.ErrFutureRev {
|
||||
t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetRetry ensures get will retry on disconnect.
|
||||
func TestKVGetRetry(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clusterSize := 3
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// because killing leader and following election
|
||||
// could give no other endpoints for client reconnection
|
||||
fIdx := (clus.WaitLeader(t) + 1) % clusterSize
|
||||
|
||||
kv := clus.Client(fIdx)
|
||||
ctx := context.TODO()
|
||||
|
||||
if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clus.Members[fIdx].Stop(t)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(ctx, "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
wkvs := []*mvccpb.KeyValue{
|
||||
{
|
||||
Key: []byte("foo"),
|
||||
Value: []byte("bar"),
|
||||
CreateRevision: 2,
|
||||
ModRevision: 2,
|
||||
Version: 1,
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(gresp.Kvs, wkvs) {
|
||||
t.Fatalf("bad get: got %v, want %v", gresp.Kvs, wkvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
clus.Members[fIdx].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutFailGetRetry ensures a get will retry following a failed put.
|
||||
func TestKVPutFailGetRetry(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
defer cancel()
|
||||
_, err := kv.Put(ctx, "foo", "bar")
|
||||
if err == nil {
|
||||
t.Fatalf("got success on disconnected put, wanted error")
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
// Get will fail, but reconnect will trigger
|
||||
gresp, gerr := kv.Get(context.TODO(), "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("bad get kvs: got %+v, want empty", gresp.Kvs)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetCancel tests that a context cancel on a Get terminates as expected.
|
||||
func TestKVGetCancel(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
oldconn := clus.Client(0).ActiveConnection()
|
||||
kv := clus.Client(0)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
cancel()
|
||||
|
||||
resp, err := kv.Get(ctx, "abc")
|
||||
if err == nil {
|
||||
t.Fatalf("cancel on get response %v, expected context error", resp)
|
||||
}
|
||||
newconn := clus.Client(0).ActiveConnection()
|
||||
if oldconn != newconn {
|
||||
t.Fatalf("cancel on get broke client connection")
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
|
||||
func TestKVGetStoppedServerAndClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
// this Get fails and triggers an asynchronous connection retry
|
||||
_, err := cli.Get(ctx, "abc")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
|
||||
func TestKVPutStoppedServerAndClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
// get retries on all errors.
|
||||
// so here we use it to eat the potential broken pipe error for the next put.
|
||||
// grpc client might see a broken pipe error when we issue the get request before
|
||||
// grpc finds out the original connection is down due to the member shutdown.
|
||||
_, err := cli.Get(ctx, "abc")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// this Put fails and triggers an asynchronous connection retry
|
||||
_, err = cli.Put(ctx, "abc", "123")
|
||||
cancel()
|
||||
if !strings.Contains(err.Error(), "context deadline") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
|
||||
func TestKVPutOneEndpointDown(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 3)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
|
||||
// make a dead node
|
||||
clus.Members[rand.Intn(len(eps))].Stop(t)
|
||||
|
||||
// try to connect with dead node in the endpoint list
|
||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
|
||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
|
||||
// TestKVGetResetLoneEndpoint ensures that if an endpoint resets and all other
|
||||
// endpoints are down, then it will reconnect.
|
||||
func TestKVGetResetLoneEndpoint(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// get endpoint list
|
||||
eps := make([]string, 2)
|
||||
for i := range eps {
|
||||
eps[i] = clus.Members[i].GRPCAddr()
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 500 * time.Millisecond}
|
||||
cli, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// disconnect everything
|
||||
clus.Members[0].Stop(t)
|
||||
clus.Members[1].Stop(t)
|
||||
|
||||
// have Get try to reconnect
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cancel()
|
||||
close(donec)
|
||||
}()
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
clus.Members[0].Restart(t)
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("timed out waiting for Get")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
761
vendor/github.com/coreos/etcd/clientv3.old/integration/lease_test.go
generated
vendored
Normal file
761
vendor/github.com/coreos/etcd/clientv3.old/integration/lease_test.go
generated
vendored
Normal file
@ -0,0 +1,761 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/concurrency"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestLeaseNotFoundError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
_, err := kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(clientv3.LeaseID(500)))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrant(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create key with lease %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevoke(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
kv := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.Revoke(context.Background(), clientv3.LeaseID(resp.ID))
|
||||
if err != nil {
|
||||
t.Errorf("failed to revoke lease %v", err)
|
||||
}
|
||||
|
||||
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(resp.ID))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Fatalf("err = %v, want %v", err, rpctypes.ErrLeaseNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOnce(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.KeepAliveOnce(context.Background(), resp.ID)
|
||||
if err != nil {
|
||||
t.Errorf("failed to keepalive lease %v", err)
|
||||
}
|
||||
|
||||
_, err = lapi.KeepAliveOnce(context.Background(), clientv3.LeaseID(0))
|
||||
if err != rpctypes.ErrLeaseNotFound {
|
||||
t.Errorf("expected %v, got %v", rpctypes.ErrLeaseNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAlive(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
lapi := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
rc, kerr := lapi.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
kresp, ok := <-rc
|
||||
if !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
lapi.Close()
|
||||
|
||||
_, ok = <-rc
|
||||
if ok {
|
||||
t.Errorf("chan is not closed, want lease Close() closes chan")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
resp, err := cli.Grant(context.Background(), 1)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, ok := <-rc; !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add a client that can connect to all the members of cluster via unix sock.
|
||||
// TODO: test handle more complicated failures.
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
t.Skip("test it when we have a cluster client")
|
||||
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
// TODO: change this line to get a cluster client
|
||||
lapi := clus.RandClient()
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
rc, kerr := lapi.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
kresp := <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// restart the connected member.
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
select {
|
||||
case <-rc:
|
||||
t.Fatalf("unexpected keepalive")
|
||||
case <-time.After(10*time.Second/3 + 1):
|
||||
}
|
||||
|
||||
// recover the member.
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
kresp = <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
lapi.Close()
|
||||
|
||||
_, ok := <-rc
|
||||
if ok {
|
||||
t.Errorf("chan is not closed, want lease Close() closes chan")
|
||||
}
|
||||
}
|
||||
|
||||
type leaseCh struct {
|
||||
lid clientv3.LeaseID
|
||||
ch <-chan *clientv3.LeaseKeepAliveResponse
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveNotFound ensures a revoked lease won't stop other keep alives
|
||||
func TestLeaseKeepAliveNotFound(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
lchs := []leaseCh{}
|
||||
for i := 0; i < 3; i++ {
|
||||
resp, rerr := cli.Grant(context.TODO(), 5)
|
||||
if rerr != nil {
|
||||
t.Fatal(rerr)
|
||||
}
|
||||
kach, kaerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kaerr != nil {
|
||||
t.Fatal(kaerr)
|
||||
}
|
||||
lchs = append(lchs, leaseCh{resp.ID, kach})
|
||||
}
|
||||
|
||||
if _, err := cli.Revoke(context.TODO(), lchs[1].lid); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
<-lchs[0].ch
|
||||
if _, ok := <-lchs[0].ch; !ok {
|
||||
t.Fatalf("closed keepalive on wrong lease")
|
||||
}
|
||||
|
||||
timec := time.After(5 * time.Second)
|
||||
for range lchs[1].ch {
|
||||
select {
|
||||
case <-timec:
|
||||
t.Fatalf("revoke did not close keep alive")
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrantErrConnClosed(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
_, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil && err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseGrantNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Grant(context.TODO(), 5); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Grant took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseRevokeNewAfterClose(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
resp, err := cli.Grant(context.TODO(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
leaseID := resp.ID
|
||||
|
||||
clus.TakeClient(0)
|
||||
if err := cli.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
if _, err := cli.Revoke(context.TODO(), leaseID); err != grpc.ErrClientConnClosing {
|
||||
t.Fatalf("expected %v, got %v", grpc.ErrClientConnClosing, err)
|
||||
}
|
||||
close(donec)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("le.Revoke took too long")
|
||||
case <-donec:
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveCloseAfterDisconnectExpire ensures the keep alive channel is closed
|
||||
// following a disconnection, lease revoke, then reconnect.
|
||||
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
kresp := <-rc
|
||||
if kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
time.Sleep(time.Second)
|
||||
clus.WaitLeader(t)
|
||||
|
||||
if _, err := clus.Client(1).Revoke(context.TODO(), resp.ID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
|
||||
// some keep-alives may still be buffered; drain until close
|
||||
timer := time.After(time.Duration(kresp.TTL) * time.Second)
|
||||
for kresp != nil {
|
||||
select {
|
||||
case kresp = <-rc:
|
||||
case <-timer:
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// the initial keep alive request never gets a response.
|
||||
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
// TestLeaseKeepAliveInitTimeout ensures the keep alive channel closes if
|
||||
// a keep alive request after the first never gets a response.
|
||||
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
// setup lease and do a keepalive
|
||||
resp, err := cli.Grant(context.Background(), 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Fatal(kerr)
|
||||
}
|
||||
if kresp := <-rc; kresp.ID != resp.ID {
|
||||
t.Fatalf("ID = %x, want %x", kresp.ID, resp.ID)
|
||||
}
|
||||
|
||||
// keep client disconnected
|
||||
clus.Members[0].Stop(t)
|
||||
select {
|
||||
case ka, ok := <-rc:
|
||||
if ok {
|
||||
t.Fatalf("unexpected keepalive %v, expected closed channel", ka)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatalf("keepalive channel did not close")
|
||||
}
|
||||
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLive(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.RandClient()
|
||||
lapi := c
|
||||
|
||||
resp, err := lapi.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
|
||||
kv := clus.RandClient()
|
||||
keys := []string{"foo1", "foo2"}
|
||||
for i := range keys {
|
||||
if _, err = kv.Put(context.TODO(), keys[i], "bar", clientv3.WithLease(resp.ID)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// linearized read to ensure Puts propagated to server backing lapi
|
||||
if _, err := c.Get(context.TODO(), "abc"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys())
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("leaseID expected %d, got %d", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.GrantedTTL != int64(10) {
|
||||
t.Fatalf("GrantedTTL expected %d, got %d", 10, lresp.GrantedTTL)
|
||||
}
|
||||
if lresp.TTL == 0 || lresp.TTL > lresp.GrantedTTL {
|
||||
t.Fatalf("unexpected TTL %d (granted %d)", lresp.TTL, lresp.GrantedTTL)
|
||||
}
|
||||
ks := make([]string, len(lresp.Keys))
|
||||
for i := range lresp.Keys {
|
||||
ks[i] = string(lresp.Keys[i])
|
||||
}
|
||||
sort.Strings(ks)
|
||||
if !reflect.DeepEqual(ks, keys) {
|
||||
t.Fatalf("keys expected %v, got %v", keys, ks)
|
||||
}
|
||||
|
||||
lresp, lerr = lapi.TimeToLive(context.Background(), resp.ID)
|
||||
if lerr != nil {
|
||||
t.Fatal(lerr)
|
||||
}
|
||||
if len(lresp.Keys) != 0 {
|
||||
t.Fatalf("unexpected keys %+v", lresp.Keys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseTimeToLiveLeaseNotFound(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.RandClient()
|
||||
resp, err := cli.Grant(context.Background(), 10)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
_, err = cli.Revoke(context.Background(), resp.ID)
|
||||
if err != nil {
|
||||
t.Errorf("failed to Revoke lease %v", err)
|
||||
}
|
||||
|
||||
lresp, err := cli.TimeToLive(context.Background(), resp.ID)
|
||||
// TimeToLive() doesn't return LeaseNotFound error
|
||||
// but return a response with TTL to be -1
|
||||
if err != nil {
|
||||
t.Fatalf("expected err to be nil")
|
||||
}
|
||||
if lresp == nil {
|
||||
t.Fatalf("expected lresp not to be nil")
|
||||
}
|
||||
if lresp.ResponseHeader == nil {
|
||||
t.Fatalf("expected ResponseHeader not to be nil")
|
||||
}
|
||||
if lresp.ID != resp.ID {
|
||||
t.Fatalf("expected Lease ID %v, but got %v", resp.ID, lresp.ID)
|
||||
}
|
||||
if lresp.TTL != -1 {
|
||||
t.Fatalf("expected TTL %v, but got %v", lresp.TTL, lresp.TTL)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLeaseRenewLostQuorum ensures keepalives work after losing quorum
|
||||
// for a while.
|
||||
func TestLeaseRenewLostQuorum(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
r, err := cli.Grant(context.TODO(), 4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
kctx, kcancel := context.WithCancel(context.Background())
|
||||
defer kcancel()
|
||||
ka, err := cli.KeepAlive(kctx, r.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// consume first keepalive so next message sends when cluster is down
|
||||
<-ka
|
||||
lastKa := time.Now()
|
||||
|
||||
// force keepalive stream message to timeout
|
||||
clus.Members[1].Stop(t)
|
||||
clus.Members[2].Stop(t)
|
||||
// Use TTL-2 since the client closes the keepalive channel if no
|
||||
// keepalive arrives before the lease deadline; the client will
|
||||
// try to resend a keepalive after TTL/3 seconds, so for a TTL of 4,
|
||||
// sleeping for 2s should be sufficient time for issuing a retry.
|
||||
// The cluster has two seconds to recover and reply to the keepalive.
|
||||
time.Sleep(time.Duration(r.TTL-2) * time.Second)
|
||||
clus.Members[1].Restart(t)
|
||||
clus.Members[2].Restart(t)
|
||||
|
||||
if time.Since(lastKa) > time.Duration(r.TTL)*time.Second {
|
||||
t.Skip("waited too long for server stop and restart")
|
||||
}
|
||||
|
||||
select {
|
||||
case _, ok := <-ka:
|
||||
if !ok {
|
||||
t.Fatalf("keepalive closed")
|
||||
}
|
||||
case <-time.After(time.Duration(r.TTL) * time.Second):
|
||||
t.Fatalf("timed out waiting for keepalive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveLoopExit(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx := context.Background()
|
||||
cli := clus.Client(0)
|
||||
clus.TakeClient(0)
|
||||
|
||||
resp, err := cli.Grant(ctx, 5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cli.Close()
|
||||
|
||||
_, err = cli.KeepAlive(ctx, resp.ID)
|
||||
if _, ok := err.(clientv3.ErrKeepAliveHalted); !ok {
|
||||
t.Fatalf("expected %T, got %v(%T)", clientv3.ErrKeepAliveHalted{}, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestV3LeaseFailureOverlap issues Grant and Keepalive requests to a cluster
|
||||
// before, during, and after quorum loss to confirm Grant/Keepalive tolerates
|
||||
// transient cluster failure.
|
||||
func TestV3LeaseFailureOverlap(t *testing.T) {
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
numReqs := 5
|
||||
cli := clus.Client(0)
|
||||
|
||||
// bring up a session, tear it down
|
||||
updown := func(i int) error {
|
||||
sess, err := concurrency.NewSession(cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
sess.Close()
|
||||
}()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-time.After(time.Minute / 4):
|
||||
t.Fatalf("timeout %d", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
mkReqs := func(n int) {
|
||||
wg.Add(numReqs)
|
||||
for i := 0; i < numReqs; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := updown(n)
|
||||
if err == nil || err == rpctypes.ErrTimeoutDueToConnectionLost {
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
mkReqs(1)
|
||||
clus.Members[1].Stop(t)
|
||||
mkReqs(2)
|
||||
time.Sleep(time.Second)
|
||||
mkReqs(3)
|
||||
clus.Members[1].Restart(t)
|
||||
mkReqs(4)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestLeaseWithRequireLeader checks keep-alive channel close when no leader.
|
||||
func TestLeaseWithRequireLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
lid1, err1 := c.Grant(context.TODO(), 60)
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
lid2, err2 := c.Grant(context.TODO(), 60)
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
// kaReqLeader close if the leader is lost
|
||||
kaReqLeader, kerr1 := c.KeepAlive(clientv3.WithRequireLeader(context.TODO()), lid1.ID)
|
||||
if kerr1 != nil {
|
||||
t.Fatal(kerr1)
|
||||
}
|
||||
// kaWait will wait even if the leader is lost
|
||||
kaWait, kerr2 := c.KeepAlive(context.TODO(), lid2.ID)
|
||||
if kerr2 != nil {
|
||||
t.Fatal(kerr2)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-kaReqLeader:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("require leader first keep-alive timed out")
|
||||
}
|
||||
select {
|
||||
case <-kaWait:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("leader not required first keep-alive timed out")
|
||||
}
|
||||
|
||||
clus.Members[1].Stop(t)
|
||||
// kaReqLeader may issue multiple requests while waiting for the first
|
||||
// response from proxy server; drain any stray keepalive responses
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
for len(kaReqLeader) > 0 {
|
||||
<-kaReqLeader
|
||||
}
|
||||
|
||||
select {
|
||||
case resp, ok := <-kaReqLeader:
|
||||
if ok {
|
||||
t.Fatalf("expected closed require leader, got response %+v", resp)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("keepalive with require leader took too long to close")
|
||||
}
|
||||
select {
|
||||
case _, ok := <-kaWait:
|
||||
if !ok {
|
||||
t.Fatalf("got closed channel with no require leader, expected non-closed")
|
||||
}
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
// wait some to detect any closes happening soon after kaReqLeader closing
|
||||
}
|
||||
}
|
21
vendor/github.com/coreos/etcd/clientv3.old/integration/logger_test.go
generated
vendored
Normal file
21
vendor/github.com/coreos/etcd/clientv3.old/integration/logger_test.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import "github.com/coreos/pkg/capnslog"
|
||||
|
||||
func init() {
|
||||
capnslog.SetGlobalLogLevel(capnslog.INFO)
|
||||
}
|
20
vendor/github.com/coreos/etcd/clientv3.old/integration/main_test.go
generated
vendored
Normal file
20
vendor/github.com/coreos/etcd/clientv3.old/integration/main_test.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
v := m.Run()
|
||||
if v == 0 && testutil.CheckLeakedGoroutine() {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(v)
|
||||
}
|
177
vendor/github.com/coreos/etcd/clientv3.old/integration/metrics_test.go
generated
vendored
Normal file
177
vendor/github.com/coreos/etcd/clientv3.old/integration/metrics_test.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestV3ClientMetrics(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
addr string = "localhost:27989"
|
||||
ln net.Listener
|
||||
err error
|
||||
)
|
||||
|
||||
// listen for all prometheus metrics
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
defer close(donec)
|
||||
|
||||
srv := &http.Server{Handler: prometheus.Handler()}
|
||||
srv.SetKeepAlivesEnabled(false)
|
||||
|
||||
ln, err = transport.NewUnixListener(addr)
|
||||
if err != nil {
|
||||
t.Fatalf("Error: %v occurred while listening on addr: %v", err, addr)
|
||||
}
|
||||
|
||||
err = srv.Serve(ln)
|
||||
if err != nil && !transport.IsClosedConnError(err) {
|
||||
t.Fatalf("Err serving http requests: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
url := "unix://" + addr + "/metrics"
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cfg := clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
}
|
||||
cli, cerr := clientv3.New(cfg)
|
||||
if cerr != nil {
|
||||
t.Fatal(cerr)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
wc := cli.Watch(context.Background(), "foo")
|
||||
|
||||
wBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
|
||||
pBefore := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
|
||||
_, err = cli.Put(context.Background(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Errorf("Error putting value in key store")
|
||||
}
|
||||
|
||||
pAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_started_total", "Put", "unary")
|
||||
if pBefore+1 != pAfter {
|
||||
t.Errorf("grpc_client_started_total expected %d, got %d", 1, pAfter-pBefore)
|
||||
}
|
||||
|
||||
// consume watch response
|
||||
select {
|
||||
case <-wc:
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("Timeout occurred for getting watch response")
|
||||
}
|
||||
|
||||
wAfter := sumCountersForMetricAndLabels(t, url, "grpc_client_msg_received_total", "Watch", "bidi_stream")
|
||||
if wBefore+1 != wAfter {
|
||||
t.Errorf("grpc_client_msg_received_total expected %d, got %d", 1, wAfter-wBefore)
|
||||
}
|
||||
|
||||
ln.Close()
|
||||
<-donec
|
||||
}
|
||||
|
||||
func sumCountersForMetricAndLabels(t *testing.T, url string, metricName string, matchingLabelValues ...string) int {
|
||||
count := 0
|
||||
for _, line := range getHTTPBodyAsLines(t, url) {
|
||||
ok := true
|
||||
if !strings.HasPrefix(line, metricName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, labelValue := range matchingLabelValues {
|
||||
if !strings.Contains(line, `"`+labelValue+`"`) {
|
||||
ok = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
valueString := line[strings.LastIndex(line, " ")+1 : len(line)-1]
|
||||
valueFloat, err := strconv.ParseFloat(valueString, 32)
|
||||
if err != nil {
|
||||
t.Fatalf("failed parsing value for line: %v and matchingLabelValues: %v", line, matchingLabelValues)
|
||||
}
|
||||
count += int(valueFloat)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func getHTTPBodyAsLines(t *testing.T, url string) []string {
|
||||
cfgtls := transport.TLSInfo{}
|
||||
tr, err := transport.NewTransport(cfgtls, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting transport: %v", err)
|
||||
}
|
||||
|
||||
tr.MaxIdleConns = -1
|
||||
tr.DisableKeepAlives = true
|
||||
|
||||
cli := &http.Client{Transport: tr}
|
||||
|
||||
resp, err := cli.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("Error fetching: %v", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(resp.Body)
|
||||
lines := []string{}
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
t.Fatalf("error reading: %v", err)
|
||||
}
|
||||
}
|
||||
lines = append(lines, line)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return lines
|
||||
}
|
125
vendor/github.com/coreos/etcd/clientv3.old/integration/mirror_test.go
generated
vendored
Normal file
125
vendor/github.com/coreos/etcd/clientv3.old/integration/mirror_test.go
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3/mirror"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestMirrorSync(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
_, err := c.KV.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
syncer := mirror.NewSyncer(c, "", 0)
|
||||
gch, ech := syncer.SyncBase(context.TODO())
|
||||
wkvs := []*mvccpb.KeyValue{{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}
|
||||
|
||||
for g := range gch {
|
||||
if !reflect.DeepEqual(g.Kvs, wkvs) {
|
||||
t.Fatalf("kv = %v, want %v", g.Kvs, wkvs)
|
||||
}
|
||||
}
|
||||
|
||||
for e := range ech {
|
||||
t.Fatalf("unexpected error %v", e)
|
||||
}
|
||||
|
||||
wch := syncer.SyncUpdates(context.TODO())
|
||||
|
||||
_, err = c.KV.Put(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case r := <-wch:
|
||||
wkv := &mvccpb.KeyValue{Key: []byte("foo"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 3, Version: 2}
|
||||
if !reflect.DeepEqual(r.Events[0].Kv, wkv) {
|
||||
t.Fatalf("kv = %v, want %v", r.Events[0].Kv, wkv)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("failed to receive update in one second")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMirrorSyncBase(t *testing.T) {
|
||||
cluster := integration.NewClusterV3(nil, &integration.ClusterConfig{Size: 1})
|
||||
defer cluster.Terminate(nil)
|
||||
|
||||
cli := cluster.Client(0)
|
||||
ctx := context.TODO()
|
||||
|
||||
keyCh := make(chan string)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for key := range keyCh {
|
||||
if _, err := cli.Put(ctx, key, "test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < 2000; i++ {
|
||||
keyCh <- fmt.Sprintf("test%d", i)
|
||||
}
|
||||
|
||||
close(keyCh)
|
||||
wg.Wait()
|
||||
|
||||
syncer := mirror.NewSyncer(cli, "test", 0)
|
||||
respCh, errCh := syncer.SyncBase(ctx)
|
||||
|
||||
count := 0
|
||||
|
||||
for resp := range respCh {
|
||||
count = count + len(resp.Kvs)
|
||||
if !resp.More {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for err := range errCh {
|
||||
t.Fatalf("unexpected error %v", err)
|
||||
}
|
||||
|
||||
if count != 2000 {
|
||||
t.Errorf("unexpected kv count: %d", count)
|
||||
}
|
||||
}
|
86
vendor/github.com/coreos/etcd/clientv3.old/integration/namespace_test.go
generated
vendored
Normal file
86
vendor/github.com/coreos/etcd/clientv3.old/integration/namespace_test.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/namespace"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestNamespacePutGet(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
nsKV := namespace.NewKV(c.KV, "foo/")
|
||||
|
||||
if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := nsKV.Get(context.TODO(), "abc")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(resp.Kvs[0].Key) != "abc" {
|
||||
t.Errorf("expected key=%q, got key=%q", "abc", resp.Kvs[0].Key)
|
||||
}
|
||||
|
||||
resp, err = c.Get(context.TODO(), "foo/abc")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(resp.Kvs[0].Value) != "bar" {
|
||||
t.Errorf("expected value=%q, got value=%q", "bar", resp.Kvs[0].Value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNamespaceWatch(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
c := clus.Client(0)
|
||||
nsKV := namespace.NewKV(c.KV, "foo/")
|
||||
nsWatcher := namespace.NewWatcher(c.Watcher, "foo/")
|
||||
|
||||
if _, err := nsKV.Put(context.TODO(), "abc", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
nsWch := nsWatcher.Watch(context.TODO(), "abc", clientv3.WithRev(1))
|
||||
wkv := &mvccpb.KeyValue{Key: []byte("abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}
|
||||
if wr := <-nsWch; len(wr.Events) != 1 || !reflect.DeepEqual(wr.Events[0].Kv, wkv) {
|
||||
t.Errorf("expected namespaced event %+v, got %+v", wkv, wr.Events[0].Kv)
|
||||
}
|
||||
|
||||
wch := c.Watch(context.TODO(), "foo/abc", clientv3.WithRev(1))
|
||||
wkv = &mvccpb.KeyValue{Key: []byte("foo/abc"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}
|
||||
if wr := <-wch; len(wr.Events) != 1 || !reflect.DeepEqual(wr.Events[0].Kv, wkv) {
|
||||
t.Errorf("expected unnamespaced event %+v, got %+v", wkv, wr)
|
||||
}
|
||||
|
||||
// let client close teardown namespace watch
|
||||
c.Watcher = nsWatcher
|
||||
}
|
43
vendor/github.com/coreos/etcd/clientv3.old/integration/role_test.go
generated
vendored
Normal file
43
vendor/github.com/coreos/etcd/clientv3.old/integration/role_test.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestRoleError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
|
||||
_, err := authapi.RoleAdd(context.TODO(), "test-role")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = authapi.RoleAdd(context.TODO(), "test-role")
|
||||
if err != rpctypes.ErrRoleAlreadyExist {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrRoleAlreadyExist, err)
|
||||
}
|
||||
}
|
227
vendor/github.com/coreos/etcd/clientv3.old/integration/txn_test.go
generated
vendored
Normal file
227
vendor/github.com/coreos/etcd/clientv3.old/integration/txn_test.go
generated
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/embed"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestTxnError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.RandClient()
|
||||
ctx := context.TODO()
|
||||
|
||||
_, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar1"), clientv3.OpPut("foo", "bar2")).Commit()
|
||||
if err != rpctypes.ErrDuplicateKey {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrDuplicateKey, err)
|
||||
}
|
||||
|
||||
ops := make([]clientv3.Op, int(embed.DefaultMaxTxnOps+10))
|
||||
for i := range ops {
|
||||
ops[i] = clientv3.OpPut(fmt.Sprintf("foo%d", i), "")
|
||||
}
|
||||
_, err = kv.Txn(ctx).Then(ops...).Commit()
|
||||
if err != rpctypes.ErrTooManyOps {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrTooManyOps, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxnWriteFail(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
|
||||
clus.Members[0].Stop(t)
|
||||
|
||||
txnc, getc := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
|
||||
defer cancel()
|
||||
resp, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got response %v", resp)
|
||||
}
|
||||
close(txnc)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer close(getc)
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timed out waiting for txn fail")
|
||||
case <-txnc:
|
||||
}
|
||||
// and ensure the put didn't take
|
||||
gresp, gerr := clus.Client(1).Get(context.TODO(), "foo")
|
||||
if gerr != nil {
|
||||
t.Fatal(gerr)
|
||||
}
|
||||
if len(gresp.Kvs) != 0 {
|
||||
t.Fatalf("expected no keys, got %v", gresp.Kvs)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
|
||||
t.Fatalf("timed out waiting for get")
|
||||
case <-getc:
|
||||
}
|
||||
|
||||
// reconnect so terminate doesn't complain about double-close
|
||||
clus.Members[0].Restart(t)
|
||||
}
|
||||
|
||||
func TestTxnReadRetry(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
|
||||
thenOps := [][]clientv3.Op{
|
||||
{clientv3.OpGet("foo")},
|
||||
{clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpGet("foo")}, nil)},
|
||||
{clientv3.OpTxn(nil, nil, nil)},
|
||||
{},
|
||||
}
|
||||
for i := range thenOps {
|
||||
clus.Members[0].Stop(t)
|
||||
<-clus.Members[0].StopNotify()
|
||||
|
||||
donec := make(chan struct{})
|
||||
go func() {
|
||||
_, err := kv.Txn(context.TODO()).Then(thenOps[i]...).Commit()
|
||||
if err != nil {
|
||||
t.Fatalf("expected response, got error %v", err)
|
||||
}
|
||||
donec <- struct{}{}
|
||||
}()
|
||||
// wait for txn to fail on disconnect
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// restart node; client should resume
|
||||
clus.Members[0].Restart(t)
|
||||
select {
|
||||
case <-donec:
|
||||
case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()):
|
||||
t.Fatalf("waited too long")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxnSuccess(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
ctx := context.TODO()
|
||||
|
||||
_, err := kv.Txn(ctx).Then(clientv3.OpPut("foo", "bar")).Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := kv.Get(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" {
|
||||
t.Fatalf("unexpected Get response %v", resp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxnCompareRange(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
fooResp, err := kv.Put(context.TODO(), "foo/", "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = kv.Put(context.TODO(), "foo/a", "baz"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tresp, terr := kv.Txn(context.TODO()).If(
|
||||
clientv3.Compare(
|
||||
clientv3.CreateRevision("foo/"), "=", fooResp.Header.Revision).
|
||||
WithPrefix(),
|
||||
).Commit()
|
||||
if terr != nil {
|
||||
t.Fatal(terr)
|
||||
}
|
||||
if tresp.Succeeded {
|
||||
t.Fatal("expected prefix compare to false, got compares as true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTxnNested(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
kv := clus.Client(0)
|
||||
|
||||
tresp, err := kv.Txn(context.TODO()).
|
||||
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
|
||||
Then(
|
||||
clientv3.OpPut("foo", "bar"),
|
||||
clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpPut("abc", "123")}, nil)).
|
||||
Else(clientv3.OpPut("foo", "baz")).Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(tresp.Responses) != 2 {
|
||||
t.Errorf("expected 2 top-level txn responses, got %+v", tresp.Responses)
|
||||
}
|
||||
|
||||
// check txn writes were applied
|
||||
resp, err := kv.Get(context.TODO(), "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" {
|
||||
t.Errorf("unexpected Get response %+v", resp)
|
||||
}
|
||||
resp, err = kv.Get(context.TODO(), "abc")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "123" {
|
||||
t.Errorf("unexpected Get response %+v", resp)
|
||||
}
|
||||
}
|
106
vendor/github.com/coreos/etcd/clientv3.old/integration/user_test.go
generated
vendored
Normal file
106
vendor/github.com/coreos/etcd/clientv3.old/integration/user_test.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestUserError(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
|
||||
_, err := authapi.UserAdd(context.TODO(), "foo", "bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = authapi.UserAdd(context.TODO(), "foo", "bar")
|
||||
if err != rpctypes.ErrUserAlreadyExist {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserAlreadyExist, err)
|
||||
}
|
||||
|
||||
_, err = authapi.UserDelete(context.TODO(), "not-exist-user")
|
||||
if err != rpctypes.ErrUserNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||
}
|
||||
|
||||
_, err = authapi.UserGrantRole(context.TODO(), "foo", "test-role-does-not-exist")
|
||||
if err != rpctypes.ErrRoleNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrRoleNotFound, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserErrorAuth(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
authapi := clus.RandClient()
|
||||
authSetupRoot(t, authapi.Auth)
|
||||
|
||||
// un-authenticated client
|
||||
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||
}
|
||||
|
||||
// wrong id or password
|
||||
cfg := clientv3.Config{Endpoints: authapi.Endpoints()}
|
||||
cfg.Username, cfg.Password = "wrong-id", "123"
|
||||
if _, err := clientv3.New(cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
cfg.Username, cfg.Password = "root", "wrong-pass"
|
||||
if _, err := clientv3.New(cfg); err != rpctypes.ErrAuthFailed {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrAuthFailed, err)
|
||||
}
|
||||
|
||||
cfg.Username, cfg.Password = "root", "123"
|
||||
authed, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer authed.Close()
|
||||
|
||||
if _, err := authed.UserList(context.TODO()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func authSetupRoot(t *testing.T, auth clientv3.Auth) {
|
||||
if _, err := auth.UserAdd(context.TODO(), "root", "123"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := auth.RoleAdd(context.TODO(), "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := auth.UserGrantRole(context.TODO(), "root", "root"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := auth.AuthEnable(context.TODO()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
1059
vendor/github.com/coreos/etcd/clientv3.old/integration/watch_test.go
generated
vendored
Normal file
1059
vendor/github.com/coreos/etcd/clientv3.old/integration/watch_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
182
vendor/github.com/coreos/etcd/clientv3.old/kv.go
generated
vendored
Normal file
182
vendor/github.com/coreos/etcd/clientv3.old/kv.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
CompactResponse pb.CompactionResponse
|
||||
PutResponse pb.PutResponse
|
||||
GetResponse pb.RangeResponse
|
||||
DeleteResponse pb.DeleteRangeResponse
|
||||
TxnResponse pb.TxnResponse
|
||||
)
|
||||
|
||||
type KV interface {
|
||||
// Put puts a key-value pair into etcd.
|
||||
// Note that key,value can be plain bytes array and string is
|
||||
// an immutable representation of that bytes array.
|
||||
// To get a string of bytes, do string([]byte{0x10, 0x20}).
|
||||
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
|
||||
|
||||
// Get retrieves keys.
|
||||
// By default, Get will return the value for "key", if any.
|
||||
// When passed WithRange(end), Get will return the keys in the range [key, end).
|
||||
// When passed WithFromKey(), Get returns keys greater than or equal to key.
|
||||
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
|
||||
// if the required revision is compacted, the request will fail with ErrCompacted .
|
||||
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
|
||||
// When passed WithSort(), the keys will be sorted.
|
||||
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
|
||||
|
||||
// Delete deletes a key, or optionally using WithRange(end), [key, end).
|
||||
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
|
||||
|
||||
// Compact compacts etcd KV history before the given rev.
|
||||
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
|
||||
|
||||
// Do applies a single Op on KV without a transaction.
|
||||
// Do is useful when creating arbitrary operations to be issued at a
|
||||
// later time; the user can range over the operations, calling Do to
|
||||
// execute them. Get/Put/Delete, on the other hand, are best suited
|
||||
// for when the operation should be issued at the time of declaration.
|
||||
Do(ctx context.Context, op Op) (OpResponse, error)
|
||||
|
||||
// Txn creates a transaction.
|
||||
Txn(ctx context.Context) Txn
|
||||
}
|
||||
|
||||
type OpResponse struct {
|
||||
put *PutResponse
|
||||
get *GetResponse
|
||||
del *DeleteResponse
|
||||
txn *TxnResponse
|
||||
}
|
||||
|
||||
func (op OpResponse) Put() *PutResponse { return op.put }
|
||||
func (op OpResponse) Get() *GetResponse { return op.get }
|
||||
func (op OpResponse) Del() *DeleteResponse { return op.del }
|
||||
func (op OpResponse) Txn() *TxnResponse { return op.txn }
|
||||
|
||||
func (resp *PutResponse) ToOpResponse() OpResponse {
|
||||
return OpResponse{put: resp}
|
||||
}
|
||||
func (resp *GetResponse) ToOpResponse() OpResponse {
|
||||
return OpResponse{get: resp}
|
||||
}
|
||||
func (resp *DeleteResponse) ToOpResponse() OpResponse {
|
||||
return OpResponse{del: resp}
|
||||
}
|
||||
func (resp *TxnResponse) ToOpResponse() OpResponse {
|
||||
return OpResponse{txn: resp}
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
remote pb.KVClient
|
||||
}
|
||||
|
||||
func NewKV(c *Client) KV {
|
||||
return &kv{remote: RetryKVClient(c)}
|
||||
}
|
||||
|
||||
func NewKVFromKVClient(remote pb.KVClient) KV {
|
||||
return &kv{remote: remote}
|
||||
}
|
||||
|
||||
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
|
||||
r, err := kv.Do(ctx, OpPut(key, val, opts...))
|
||||
return r.put, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
|
||||
r, err := kv.Do(ctx, OpGet(key, opts...))
|
||||
return r.get, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
|
||||
r, err := kv.Do(ctx, OpDelete(key, opts...))
|
||||
return r.del, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
|
||||
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest())
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*CompactResponse)(resp), err
|
||||
}
|
||||
|
||||
func (kv *kv) Txn(ctx context.Context) Txn {
|
||||
return &txn{
|
||||
kv: kv,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
for {
|
||||
resp, err := kv.do(ctx, op)
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
if isHaltErr(ctx, err) {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
// do not retry on modifications
|
||||
if op.isWrite() {
|
||||
return resp, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
|
||||
var err error
|
||||
switch op.t {
|
||||
case tRange:
|
||||
var resp *pb.RangeResponse
|
||||
resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return OpResponse{get: (*GetResponse)(resp)}, nil
|
||||
}
|
||||
case tPut:
|
||||
var resp *pb.PutResponse
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
resp, err = kv.remote.Put(ctx, r)
|
||||
if err == nil {
|
||||
return OpResponse{put: (*PutResponse)(resp)}, nil
|
||||
}
|
||||
case tDeleteRange:
|
||||
var resp *pb.DeleteRangeResponse
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
resp, err = kv.remote.DeleteRange(ctx, r)
|
||||
if err == nil {
|
||||
return OpResponse{del: (*DeleteResponse)(resp)}, nil
|
||||
}
|
||||
case tTxn:
|
||||
var resp *pb.TxnResponse
|
||||
resp, err = kv.remote.Txn(ctx, op.toTxnRequest())
|
||||
if err == nil {
|
||||
return OpResponse{txn: (*TxnResponse)(resp)}, nil
|
||||
}
|
||||
default:
|
||||
panic("Unknown op")
|
||||
}
|
||||
return OpResponse{}, err
|
||||
}
|
547
vendor/github.com/coreos/etcd/clientv3.old/lease.go
generated
vendored
Normal file
547
vendor/github.com/coreos/etcd/clientv3.old/lease.go
generated
vendored
Normal file
@ -0,0 +1,547 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type (
|
||||
LeaseRevokeResponse pb.LeaseRevokeResponse
|
||||
LeaseID int64
|
||||
)
|
||||
|
||||
// LeaseGrantResponse is used to convert the protobuf grant response.
|
||||
type LeaseGrantResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
Error string
|
||||
}
|
||||
|
||||
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
|
||||
type LeaseKeepAliveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response.
|
||||
type LeaseTimeToLiveResponse struct {
|
||||
*pb.ResponseHeader
|
||||
ID LeaseID `json:"id"`
|
||||
|
||||
// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
|
||||
TTL int64 `json:"ttl"`
|
||||
|
||||
// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
|
||||
GrantedTTL int64 `json:"granted-ttl"`
|
||||
|
||||
// Keys is the list of keys attached to this lease.
|
||||
Keys [][]byte `json:"keys"`
|
||||
}
|
||||
|
||||
const (
|
||||
// defaultTTL is the assumed lease TTL used for the first keepalive
|
||||
// deadline before the actual TTL is known to the client.
|
||||
defaultTTL = 5 * time.Second
|
||||
// a small buffer to store unsent lease responses.
|
||||
leaseResponseChSize = 16
|
||||
// NoLease is a lease ID for the absence of a lease.
|
||||
NoLease LeaseID = 0
|
||||
|
||||
// retryConnWait is how long to wait before retrying request due to an error
|
||||
retryConnWait = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
|
||||
//
|
||||
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
|
||||
type ErrKeepAliveHalted struct {
|
||||
Reason error
|
||||
}
|
||||
|
||||
func (e ErrKeepAliveHalted) Error() string {
|
||||
s := "etcdclient: leases keep alive halted"
|
||||
if e.Reason != nil {
|
||||
s += ": " + e.Reason.Error()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type Lease interface {
|
||||
// Grant creates a new lease.
|
||||
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
|
||||
|
||||
// Revoke revokes the given lease.
|
||||
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
|
||||
|
||||
// TimeToLive retrieves the lease information of the given lease ID.
|
||||
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
|
||||
|
||||
// KeepAlive keeps the given lease alive forever.
|
||||
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
|
||||
|
||||
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
|
||||
// should be used instead of KeepAliveOnce.
|
||||
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
|
||||
|
||||
// Close releases all resources Lease keeps for efficient communication
|
||||
// with the etcd server.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type lessor struct {
|
||||
mu sync.Mutex // guards all fields
|
||||
|
||||
// donec is closed and loopErr is set when recvKeepAliveLoop stops
|
||||
donec chan struct{}
|
||||
loopErr error
|
||||
|
||||
remote pb.LeaseClient
|
||||
|
||||
stream pb.Lease_LeaseKeepAliveClient
|
||||
streamCancel context.CancelFunc
|
||||
|
||||
stopCtx context.Context
|
||||
stopCancel context.CancelFunc
|
||||
|
||||
keepAlives map[LeaseID]*keepAlive
|
||||
|
||||
// firstKeepAliveTimeout is the timeout for the first keepalive request
|
||||
// before the actual TTL is known to the lease client
|
||||
firstKeepAliveTimeout time.Duration
|
||||
|
||||
// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
|
||||
firstKeepAliveOnce sync.Once
|
||||
}
|
||||
|
||||
// keepAlive multiplexes a keepalive for a lease over multiple channels
|
||||
type keepAlive struct {
|
||||
chs []chan<- *LeaseKeepAliveResponse
|
||||
ctxs []context.Context
|
||||
// deadline is the time the keep alive channels close if no response
|
||||
deadline time.Time
|
||||
// nextKeepAlive is when to send the next keep alive message
|
||||
nextKeepAlive time.Time
|
||||
// donec is closed on lease revoke, expiration, or cancel.
|
||||
donec chan struct{}
|
||||
}
|
||||
|
||||
func NewLease(c *Client) Lease {
|
||||
return NewLeaseFromLeaseClient(RetryLeaseClient(c), c.cfg.DialTimeout+time.Second)
|
||||
}
|
||||
|
||||
func NewLeaseFromLeaseClient(remote pb.LeaseClient, keepAliveTimeout time.Duration) Lease {
|
||||
l := &lessor{
|
||||
donec: make(chan struct{}),
|
||||
keepAlives: make(map[LeaseID]*keepAlive),
|
||||
remote: remote,
|
||||
firstKeepAliveTimeout: keepAliveTimeout,
|
||||
}
|
||||
if l.firstKeepAliveTimeout == time.Second {
|
||||
l.firstKeepAliveTimeout = defaultTTL
|
||||
}
|
||||
reqLeaderCtx := WithRequireLeader(context.Background())
|
||||
l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
|
||||
for {
|
||||
r := &pb.LeaseGrantRequest{TTL: ttl}
|
||||
resp, err := l.remote.LeaseGrant(ctx, r)
|
||||
if err == nil {
|
||||
gresp := &LeaseGrantResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
Error: resp.Error,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
|
||||
for {
|
||||
r := &pb.LeaseRevokeRequest{ID: int64(id)}
|
||||
resp, err := l.remote.LeaseRevoke(ctx, r)
|
||||
|
||||
if err == nil {
|
||||
return (*LeaseRevokeResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
|
||||
for {
|
||||
r := toLeaseTimeToLiveRequest(id, opts...)
|
||||
resp, err := l.remote.LeaseTimeToLive(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
gresp := &LeaseTimeToLiveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
GrantedTTL: resp.GrantedTTL,
|
||||
Keys: resp.Keys,
|
||||
}
|
||||
return gresp, nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
|
||||
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
|
||||
|
||||
l.mu.Lock()
|
||||
// ensure that recvKeepAliveLoop is still running
|
||||
select {
|
||||
case <-l.donec:
|
||||
err := l.loopErr
|
||||
l.mu.Unlock()
|
||||
close(ch)
|
||||
return ch, ErrKeepAliveHalted{Reason: err}
|
||||
default:
|
||||
}
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
// create fresh keep alive
|
||||
ka = &keepAlive{
|
||||
chs: []chan<- *LeaseKeepAliveResponse{ch},
|
||||
ctxs: []context.Context{ctx},
|
||||
deadline: time.Now().Add(l.firstKeepAliveTimeout),
|
||||
nextKeepAlive: time.Now(),
|
||||
donec: make(chan struct{}),
|
||||
}
|
||||
l.keepAlives[id] = ka
|
||||
} else {
|
||||
// add channel and context to existing keep alive
|
||||
ka.ctxs = append(ka.ctxs, ctx)
|
||||
ka.chs = append(ka.chs, ch)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
go l.keepAliveCtxCloser(id, ctx, ka.donec)
|
||||
l.firstKeepAliveOnce.Do(func() {
|
||||
go l.recvKeepAliveLoop()
|
||||
go l.deadlineLoop()
|
||||
})
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
for {
|
||||
resp, err := l.keepAliveOnce(ctx, id)
|
||||
if err == nil {
|
||||
if resp.TTL <= 0 {
|
||||
err = rpctypes.ErrLeaseNotFound
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) Close() error {
|
||||
l.stopCancel()
|
||||
// close for synchronous teardown if stream goroutines never launched
|
||||
l.firstKeepAliveOnce.Do(func() { close(l.donec) })
|
||||
<-l.donec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
|
||||
select {
|
||||
case <-donec:
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
ka, ok := l.keepAlives[id]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// close channel and remove context if still associated with keep alive
|
||||
for i, c := range ka.ctxs {
|
||||
if c == ctx {
|
||||
close(ka.chs[i])
|
||||
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
|
||||
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
// remove if no one more listeners
|
||||
if len(ka.chs) == 0 {
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
|
||||
// closeRequireLeader scans all keep alives for ctxs that have require leader
|
||||
// and closes the associated channels.
|
||||
func (l *lessor) closeRequireLeader() {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
for _, ka := range l.keepAlives {
|
||||
reqIdxs := 0
|
||||
// find all required leader channels, close, mark as nil
|
||||
for i, ctx := range ka.ctxs {
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ks := md[rpctypes.MetadataRequireLeaderKey]
|
||||
if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
|
||||
continue
|
||||
}
|
||||
close(ka.chs[i])
|
||||
ka.chs[i] = nil
|
||||
reqIdxs++
|
||||
}
|
||||
if reqIdxs == 0 {
|
||||
continue
|
||||
}
|
||||
// remove all channels that required a leader from keepalive
|
||||
newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
|
||||
newCtxs := make([]context.Context, len(newChs))
|
||||
newIdx := 0
|
||||
for i := range ka.chs {
|
||||
if ka.chs[i] == nil {
|
||||
continue
|
||||
}
|
||||
newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
|
||||
newIdx++
|
||||
}
|
||||
ka.chs, ka.ctxs = newChs, newCtxs
|
||||
}
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
|
||||
cctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
resp, rerr := stream.Recv()
|
||||
if rerr != nil {
|
||||
return nil, toErr(ctx, rerr)
|
||||
}
|
||||
|
||||
karesp := &LeaseKeepAliveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
}
|
||||
return karesp, nil
|
||||
}
|
||||
|
||||
func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
defer func() {
|
||||
l.mu.Lock()
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.close()
|
||||
}
|
||||
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||
l.mu.Unlock()
|
||||
}()
|
||||
|
||||
for {
|
||||
stream, err := l.resetRecv()
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
|
||||
if err != nil {
|
||||
if canceledByCaller(l.stopCtx, err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
|
||||
l.closeRequireLeader()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
l.recvKeepAlive(resp)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(retryConnWait):
|
||||
continue
|
||||
case <-l.stopCtx.Done():
|
||||
return l.stopCtx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
|
||||
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
|
||||
sctx, cancel := context.WithCancel(l.stopCtx)
|
||||
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.stream != nil && l.streamCancel != nil {
|
||||
l.streamCancel()
|
||||
}
|
||||
|
||||
l.streamCancel = cancel
|
||||
l.stream = stream
|
||||
|
||||
go l.sendKeepAliveLoop(stream)
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
|
||||
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
karesp := &LeaseKeepAliveResponse{
|
||||
ResponseHeader: resp.GetHeader(),
|
||||
ID: LeaseID(resp.ID),
|
||||
TTL: resp.TTL,
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
ka, ok := l.keepAlives[karesp.ID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if karesp.TTL <= 0 {
|
||||
// lease expired; close all keep alive channels
|
||||
delete(l.keepAlives, karesp.ID)
|
||||
ka.close()
|
||||
return
|
||||
}
|
||||
|
||||
// send update to all channels
|
||||
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||
for _, ch := range ka.chs {
|
||||
select {
|
||||
case ch <- karesp:
|
||||
ka.nextKeepAlive = nextKeepAlive
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deadlineLoop reaps any keep alive channels that have not received a response
|
||||
// within the lease TTL
|
||||
func (l *lessor) deadlineLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-l.donec:
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.deadline.Before(now) {
|
||||
// waited too long for response; lease may be expired
|
||||
ka.close()
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
|
||||
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
for {
|
||||
var tosend []LeaseID
|
||||
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.nextKeepAlive.Before(now) {
|
||||
tosend = append(tosend, id)
|
||||
}
|
||||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
for _, id := range tosend {
|
||||
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
|
||||
if err := stream.Send(r); err != nil {
|
||||
// TODO do something with this error?
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
case <-stream.Context().Done():
|
||||
return
|
||||
case <-l.donec:
|
||||
return
|
||||
case <-l.stopCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ka *keepAlive) close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
close(ch)
|
||||
}
|
||||
}
|
82
vendor/github.com/coreos/etcd/clientv3.old/logger.go
generated
vendored
Normal file
82
vendor/github.com/coreos/etcd/clientv3.old/logger.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// Logger is the logger used by client library.
|
||||
// It implements grpclog.Logger interface.
|
||||
type Logger grpclog.Logger
|
||||
|
||||
var (
|
||||
logger settableLogger
|
||||
)
|
||||
|
||||
type settableLogger struct {
|
||||
l grpclog.Logger
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
// disable client side logs by default
|
||||
logger.mu.Lock()
|
||||
logger.l = log.New(ioutil.Discard, "", 0)
|
||||
|
||||
// logger has to override the grpclog at initialization so that
|
||||
// any changes to the grpclog go through logger with locking
|
||||
// instead of through SetLogger
|
||||
//
|
||||
// now updates only happen through settableLogger.set
|
||||
grpclog.SetLogger(&logger)
|
||||
logger.mu.Unlock()
|
||||
}
|
||||
|
||||
// SetLogger sets client-side Logger. By default, logs are disabled.
|
||||
func SetLogger(l Logger) {
|
||||
logger.set(l)
|
||||
}
|
||||
|
||||
// GetLogger returns the current logger.
|
||||
func GetLogger() Logger {
|
||||
return logger.get()
|
||||
}
|
||||
|
||||
func (s *settableLogger) set(l Logger) {
|
||||
s.mu.Lock()
|
||||
logger.l = l
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *settableLogger) get() Logger {
|
||||
s.mu.RLock()
|
||||
l := logger.l
|
||||
s.mu.RUnlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// implement the grpclog.Logger interface
|
||||
|
||||
func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
|
||||
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
|
||||
func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
|
||||
func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) }
|
||||
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) }
|
||||
func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) }
|
73
vendor/github.com/coreos/etcd/clientv3.old/main_test.go
generated
vendored
Normal file
73
vendor/github.com/coreos/etcd/clientv3.old/main_test.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
func init() { auth.BcryptCost = bcrypt.MinCost }
|
||||
|
||||
// TestMain sets up an etcd cluster if running the examples.
|
||||
func TestMain(m *testing.M) {
|
||||
useCluster, hasRunArg := false, false // default to running only Test*
|
||||
for _, arg := range os.Args {
|
||||
if strings.HasPrefix(arg, "-test.run=") {
|
||||
exp := strings.Split(arg, "=")[1]
|
||||
match, err := regexp.MatchString(exp, "Example")
|
||||
useCluster = (err == nil && match) || strings.Contains(exp, "Example")
|
||||
hasRunArg = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasRunArg {
|
||||
// force only running Test* if no args given to avoid leak false
|
||||
// positives from having a long-running cluster for the examples.
|
||||
os.Args = append(os.Args, "-test.run=Test")
|
||||
}
|
||||
|
||||
v := 0
|
||||
if useCluster {
|
||||
cfg := integration.ClusterConfig{Size: 3}
|
||||
clus := integration.NewClusterV3(nil, &cfg)
|
||||
endpoints = make([]string, 3)
|
||||
for i := range endpoints {
|
||||
endpoints[i] = clus.Client(i).Endpoints()[0]
|
||||
}
|
||||
v = m.Run()
|
||||
clus.Terminate(nil)
|
||||
if err := testutil.CheckAfterTest(time.Second); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
v = m.Run()
|
||||
}
|
||||
|
||||
if v == 0 && testutil.CheckLeakedGoroutine() {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(v)
|
||||
}
|
192
vendor/github.com/coreos/etcd/clientv3.old/maintenance.go
generated
vendored
Normal file
192
vendor/github.com/coreos/etcd/clientv3.old/maintenance.go
generated
vendored
Normal file
@ -0,0 +1,192 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type (
|
||||
DefragmentResponse pb.DefragmentResponse
|
||||
AlarmResponse pb.AlarmResponse
|
||||
AlarmMember pb.AlarmMember
|
||||
StatusResponse pb.StatusResponse
|
||||
MoveLeaderResponse pb.MoveLeaderResponse
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
// AlarmList gets all active alarms.
|
||||
AlarmList(ctx context.Context) (*AlarmResponse, error)
|
||||
|
||||
// AlarmDisarm disarms a given alarm.
|
||||
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
|
||||
|
||||
// Defragment defragments storage backend of the etcd member with given endpoint.
|
||||
// Defragment is only needed when deleting a large number of keys and want to reclaim
|
||||
// the resources.
|
||||
// Defragment is an expensive operation. User should avoid defragmenting multiple members
|
||||
// at the same time.
|
||||
// To defragment multiple members in the cluster, user need to call defragment multiple
|
||||
// times with different endpoints.
|
||||
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
|
||||
|
||||
// Status gets the status of the endpoint.
|
||||
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
|
||||
|
||||
// Snapshot provides a reader for a snapshot of a backend.
|
||||
Snapshot(ctx context.Context) (io.ReadCloser, error)
|
||||
|
||||
// MoveLeader requests current leader to transfer its leadership to the transferee.
|
||||
// Request must be made to the leader.
|
||||
MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
|
||||
}
|
||||
|
||||
type maintenance struct {
|
||||
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
|
||||
remote pb.MaintenanceClient
|
||||
}
|
||||
|
||||
func NewMaintenance(c *Client) Maintenance {
|
||||
return &maintenance{
|
||||
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
|
||||
conn, err := c.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cancel := func() { conn.Close() }
|
||||
return pb.NewMaintenanceClient(conn), cancel, nil
|
||||
},
|
||||
remote: pb.NewMaintenanceClient(c.conn),
|
||||
}
|
||||
}
|
||||
|
||||
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient) Maintenance {
|
||||
return &maintenance{
|
||||
dial: func(string) (pb.MaintenanceClient, func(), error) {
|
||||
return remote, func() {}, nil
|
||||
},
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
|
||||
req := &pb.AlarmRequest{
|
||||
Action: pb.AlarmRequest_GET,
|
||||
MemberID: 0, // all
|
||||
Alarm: pb.AlarmType_NONE, // all
|
||||
}
|
||||
for {
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
|
||||
req := &pb.AlarmRequest{
|
||||
Action: pb.AlarmRequest_DEACTIVATE,
|
||||
MemberID: am.MemberID,
|
||||
Alarm: am.Alarm,
|
||||
}
|
||||
|
||||
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
|
||||
ar, err := m.AlarmList(ctx)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
ret := AlarmResponse{}
|
||||
for _, am := range ar.Alarms {
|
||||
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
|
||||
if derr != nil {
|
||||
return nil, toErr(ctx, derr)
|
||||
}
|
||||
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
|
||||
}
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*AlarmResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*DefragmentResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
|
||||
remote, cancel, err := m.dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
defer cancel()
|
||||
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
return (*StatusResponse)(resp), nil
|
||||
}
|
||||
|
||||
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
|
||||
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
for {
|
||||
resp, err := ss.Recv()
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
break
|
||||
}
|
||||
if _, werr := pw.Write(resp.Blob); werr != nil {
|
||||
pw.CloseWithError(werr)
|
||||
return
|
||||
}
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
|
||||
resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, grpc.FailFast(false))
|
||||
return (*MoveLeaderResponse)(resp), toErr(ctx, err)
|
||||
}
|
110
vendor/github.com/coreos/etcd/clientv3.old/mirror/syncer.go
generated
vendored
Normal file
110
vendor/github.com/coreos/etcd/clientv3.old/mirror/syncer.go
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package mirror implements etcd mirroring operations.
|
||||
package mirror
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
batchLimit = 1000
|
||||
)
|
||||
|
||||
// Syncer syncs with the key-value state of an etcd cluster.
|
||||
type Syncer interface {
|
||||
// SyncBase syncs the base state of the key-value state.
|
||||
// The key-value state are sent through the returned chan.
|
||||
SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error)
|
||||
// SyncUpdates syncs the updates of the key-value state.
|
||||
// The update events are sent through the returned chan.
|
||||
SyncUpdates(ctx context.Context) clientv3.WatchChan
|
||||
}
|
||||
|
||||
// NewSyncer creates a Syncer.
|
||||
func NewSyncer(c *clientv3.Client, prefix string, rev int64) Syncer {
|
||||
return &syncer{c: c, prefix: prefix, rev: rev}
|
||||
}
|
||||
|
||||
type syncer struct {
|
||||
c *clientv3.Client
|
||||
rev int64
|
||||
prefix string
|
||||
}
|
||||
|
||||
func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, chan error) {
|
||||
respchan := make(chan clientv3.GetResponse, 1024)
|
||||
errchan := make(chan error, 1)
|
||||
|
||||
// if rev is not specified, we will choose the most recent revision.
|
||||
if s.rev == 0 {
|
||||
resp, err := s.c.Get(ctx, "foo")
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
close(respchan)
|
||||
close(errchan)
|
||||
return respchan, errchan
|
||||
}
|
||||
s.rev = resp.Header.Revision
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(respchan)
|
||||
defer close(errchan)
|
||||
|
||||
var key string
|
||||
|
||||
opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}
|
||||
|
||||
if len(s.prefix) == 0 {
|
||||
// If len(s.prefix) == 0, we will sync the entire key-value space.
|
||||
// We then range from the smallest key (0x00) to the end.
|
||||
opts = append(opts, clientv3.WithFromKey())
|
||||
key = "\x00"
|
||||
} else {
|
||||
// If len(s.prefix) != 0, we will sync key-value space with given prefix.
|
||||
// We then range from the prefix to the next prefix if exists. Or we will
|
||||
// range from the prefix to the end if the next prefix does not exists.
|
||||
opts = append(opts, clientv3.WithRange(clientv3.GetPrefixRangeEnd(s.prefix)))
|
||||
key = s.prefix
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := s.c.Get(ctx, key, opts...)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
return
|
||||
}
|
||||
|
||||
respchan <- (clientv3.GetResponse)(*resp)
|
||||
|
||||
if !resp.More {
|
||||
return
|
||||
}
|
||||
// move to next key
|
||||
key = string(append(resp.Kvs[len(resp.Kvs)-1].Key, 0))
|
||||
}
|
||||
}()
|
||||
|
||||
return respchan, errchan
|
||||
}
|
||||
|
||||
func (s *syncer) SyncUpdates(ctx context.Context) clientv3.WatchChan {
|
||||
if s.rev == 0 {
|
||||
panic("unexpected revision = 0. Calling SyncUpdates before SyncBase finishes?")
|
||||
}
|
||||
return s.c.Watch(ctx, s.prefix, clientv3.WithPrefix(), clientv3.WithRev(s.rev+1))
|
||||
}
|
43
vendor/github.com/coreos/etcd/clientv3.old/namespace/doc.go
generated
vendored
Normal file
43
vendor/github.com/coreos/etcd/clientv3.old/namespace/doc.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package namespace is a clientv3 wrapper that translates all keys to begin
|
||||
// with a given prefix.
|
||||
//
|
||||
// First, create a client:
|
||||
//
|
||||
// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
//
|
||||
// Next, override the client interfaces:
|
||||
//
|
||||
// unprefixedKV := cli.KV
|
||||
// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
|
||||
// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
|
||||
// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
|
||||
//
|
||||
// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
|
||||
//
|
||||
// cli.Put(context.TODO(), "abc", "123")
|
||||
// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
|
||||
// fmt.Printf("%s\n", resp.Kvs[0].Value)
|
||||
// // Output: 123
|
||||
// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
|
||||
// resp, _ = cli.Get("abc")
|
||||
// fmt.Printf("%s\n", resp.Kvs[0].Value)
|
||||
// // Output: 456
|
||||
//
|
||||
package namespace
|
206
vendor/github.com/coreos/etcd/clientv3.old/namespace/kv.go
generated
vendored
Normal file
206
vendor/github.com/coreos/etcd/clientv3.old/namespace/kv.go
generated
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type kvPrefix struct {
|
||||
clientv3.KV
|
||||
pfx string
|
||||
}
|
||||
|
||||
// NewKV wraps a KV instance so that all requests
|
||||
// are prefixed with a given string.
|
||||
func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
|
||||
return &kvPrefix{kv, prefix}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
|
||||
r, err := kv.KV.Do(ctx, op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
put := r.Put()
|
||||
kv.unprefixPutResponse(put)
|
||||
return put, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
get := r.Get()
|
||||
kv.unprefixGetResponse(get)
|
||||
return get, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
del := r.Del()
|
||||
kv.unprefixDeleteResponse(del)
|
||||
return del, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
|
||||
if len(op.KeyBytes()) == 0 && !op.IsTxn() {
|
||||
return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
|
||||
}
|
||||
r, err := kv.KV.Do(ctx, kv.prefixOp(op))
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
switch {
|
||||
case r.Get() != nil:
|
||||
kv.unprefixGetResponse(r.Get())
|
||||
case r.Put() != nil:
|
||||
kv.unprefixPutResponse(r.Put())
|
||||
case r.Del() != nil:
|
||||
kv.unprefixDeleteResponse(r.Del())
|
||||
case r.Txn() != nil:
|
||||
kv.unprefixTxnResponse(r.Txn())
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type txnPrefix struct {
|
||||
clientv3.Txn
|
||||
kv *kvPrefix
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
|
||||
return &txnPrefix{kv.KV.Txn(ctx), kv}
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
|
||||
txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
|
||||
txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
|
||||
txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
|
||||
resp, err := txn.Txn.Commit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txn.kv.unprefixTxnResponse(resp)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
|
||||
if !op.IsTxn() {
|
||||
begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
|
||||
op.WithKeyBytes(begin)
|
||||
op.WithRangeBytes(end)
|
||||
return op
|
||||
}
|
||||
cmps, thenOps, elseOps := op.Txn()
|
||||
return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
|
||||
for i := range resp.Kvs {
|
||||
resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
|
||||
if resp.PrevKv != nil {
|
||||
resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
|
||||
for i := range resp.PrevKvs {
|
||||
resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
|
||||
for _, r := range resp.Responses {
|
||||
switch tv := r.Response.(type) {
|
||||
case *pb.ResponseOp_ResponseRange:
|
||||
if tv.ResponseRange != nil {
|
||||
kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
|
||||
}
|
||||
case *pb.ResponseOp_ResponsePut:
|
||||
if tv.ResponsePut != nil {
|
||||
kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
|
||||
}
|
||||
case *pb.ResponseOp_ResponseDeleteRange:
|
||||
if tv.ResponseDeleteRange != nil {
|
||||
kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
|
||||
}
|
||||
case *pb.ResponseOp_ResponseTxn:
|
||||
if tv.ResponseTxn != nil {
|
||||
kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
return prefixInterval(p.pfx, key, end)
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
|
||||
newCmps := make([]clientv3.Cmp, len(cs))
|
||||
for i := range cs {
|
||||
newCmps[i] = cs[i]
|
||||
pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd)
|
||||
newCmps[i].WithKeyBytes(pfxKey)
|
||||
if len(cs[i].RangeEnd) != 0 {
|
||||
newCmps[i].RangeEnd = endKey
|
||||
}
|
||||
}
|
||||
return newCmps
|
||||
}
|
||||
|
||||
func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
|
||||
newOps := make([]clientv3.Op, len(ops))
|
||||
for i := range ops {
|
||||
newOps[i] = kv.prefixOp(ops[i])
|
||||
}
|
||||
return newOps
|
||||
}
|
58
vendor/github.com/coreos/etcd/clientv3.old/namespace/lease.go
generated
vendored
Normal file
58
vendor/github.com/coreos/etcd/clientv3.old/namespace/lease.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
type leasePrefix struct {
|
||||
clientv3.Lease
|
||||
pfx []byte
|
||||
}
|
||||
|
||||
// NewLease wraps a Lease interface to filter for only keys with a prefix
|
||||
// and remove that prefix when fetching attached keys through TimeToLive.
|
||||
func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
|
||||
return &leasePrefix{l, []byte(prefix)}
|
||||
}
|
||||
|
||||
func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := l.Lease.TimeToLive(ctx, id, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Keys) > 0 {
|
||||
var outKeys [][]byte
|
||||
for i := range resp.Keys {
|
||||
if len(resp.Keys[i]) < len(l.pfx) {
|
||||
// too short
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
|
||||
// doesn't match prefix
|
||||
continue
|
||||
}
|
||||
// strip prefix
|
||||
outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
|
||||
}
|
||||
resp.Keys = outKeys
|
||||
}
|
||||
return resp, nil
|
||||
}
|
42
vendor/github.com/coreos/etcd/clientv3.old/namespace/util.go
generated
vendored
Normal file
42
vendor/github.com/coreos/etcd/clientv3.old/namespace/util.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
|
||||
pfxKey = make([]byte, len(pfx)+len(key))
|
||||
copy(pfxKey[copy(pfxKey, pfx):], key)
|
||||
|
||||
if len(end) == 1 && end[0] == 0 {
|
||||
// the edge of the keyspace
|
||||
pfxEnd = make([]byte, len(pfx))
|
||||
copy(pfxEnd, pfx)
|
||||
ok := false
|
||||
for i := len(pfxEnd) - 1; i >= 0; i-- {
|
||||
if pfxEnd[i]++; pfxEnd[i] != 0 {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// 0xff..ff => 0x00
|
||||
pfxEnd = []byte{0}
|
||||
}
|
||||
} else if len(end) >= 1 {
|
||||
pfxEnd = make([]byte, len(pfx)+len(end))
|
||||
copy(pfxEnd[copy(pfxEnd, pfx):], end)
|
||||
}
|
||||
|
||||
return pfxKey, pfxEnd
|
||||
}
|
75
vendor/github.com/coreos/etcd/clientv3.old/namespace/util_test.go
generated
vendored
Normal file
75
vendor/github.com/coreos/etcd/clientv3.old/namespace/util_test.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPrefixInterval(t *testing.T) {
|
||||
tests := []struct {
|
||||
pfx string
|
||||
key []byte
|
||||
end []byte
|
||||
|
||||
wKey []byte
|
||||
wEnd []byte
|
||||
}{
|
||||
// single key
|
||||
{
|
||||
pfx: "pfx/",
|
||||
key: []byte("a"),
|
||||
|
||||
wKey: []byte("pfx/a"),
|
||||
},
|
||||
// range
|
||||
{
|
||||
pfx: "pfx/",
|
||||
key: []byte("abc"),
|
||||
end: []byte("def"),
|
||||
|
||||
wKey: []byte("pfx/abc"),
|
||||
wEnd: []byte("pfx/def"),
|
||||
},
|
||||
// one-sided range
|
||||
{
|
||||
pfx: "pfx/",
|
||||
key: []byte("abc"),
|
||||
end: []byte{0},
|
||||
|
||||
wKey: []byte("pfx/abc"),
|
||||
wEnd: []byte("pfx0"),
|
||||
},
|
||||
// one-sided range, end of keyspace
|
||||
{
|
||||
pfx: "\xff\xff",
|
||||
key: []byte("abc"),
|
||||
end: []byte{0},
|
||||
|
||||
wKey: []byte("\xff\xffabc"),
|
||||
wEnd: []byte{0},
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
pfxKey, pfxEnd := prefixInterval(tt.pfx, tt.key, tt.end)
|
||||
if !bytes.Equal(pfxKey, tt.wKey) {
|
||||
t.Errorf("#%d: expected key=%q, got key=%q", i, tt.wKey, pfxKey)
|
||||
}
|
||||
if !bytes.Equal(pfxEnd, tt.wEnd) {
|
||||
t.Errorf("#%d: expected end=%q, got end=%q", i, tt.wEnd, pfxEnd)
|
||||
}
|
||||
}
|
||||
}
|
84
vendor/github.com/coreos/etcd/clientv3.old/namespace/watch.go
generated
vendored
Normal file
84
vendor/github.com/coreos/etcd/clientv3.old/namespace/watch.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package namespace
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
type watcherPrefix struct {
|
||||
clientv3.Watcher
|
||||
pfx string
|
||||
|
||||
wg sync.WaitGroup
|
||||
stopc chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewWatcher wraps a Watcher instance so that all Watch requests
|
||||
// are prefixed with a given string and all Watch responses have
|
||||
// the prefix removed.
|
||||
func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
|
||||
return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
|
||||
}
|
||||
|
||||
func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||
// since OpOption is opaque, determine range for prefixing through an OpGet
|
||||
op := clientv3.OpGet(key, opts...)
|
||||
end := op.RangeBytes()
|
||||
pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
|
||||
if pfxEnd != nil {
|
||||
opts = append(opts, clientv3.WithRange(string(pfxEnd)))
|
||||
}
|
||||
|
||||
wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
|
||||
|
||||
// translate watch events from prefixed to unprefixed
|
||||
pfxWch := make(chan clientv3.WatchResponse)
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
close(pfxWch)
|
||||
w.wg.Done()
|
||||
}()
|
||||
for wr := range wch {
|
||||
for i := range wr.Events {
|
||||
wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
|
||||
if wr.Events[i].PrevKv != nil {
|
||||
wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
|
||||
}
|
||||
}
|
||||
select {
|
||||
case pfxWch <- wr:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-w.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return pfxWch
|
||||
}
|
||||
|
||||
func (w *watcherPrefix) Close() error {
|
||||
err := w.Watcher.Close()
|
||||
w.stopOnce.Do(func() { close(w.stopc) })
|
||||
w.wg.Wait()
|
||||
return err
|
||||
}
|
56
vendor/github.com/coreos/etcd/clientv3.old/naming/doc.go
generated
vendored
Normal file
56
vendor/github.com/coreos/etcd/clientv3.old/naming/doc.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services.
|
||||
//
|
||||
// To use, first import the packages:
|
||||
//
|
||||
// import (
|
||||
// "github.com/coreos/etcd/clientv3"
|
||||
// etcdnaming "github.com/coreos/etcd/clientv3/naming"
|
||||
//
|
||||
// "google.golang.org/grpc"
|
||||
// "google.golang.org/grpc/naming"
|
||||
// )
|
||||
//
|
||||
// First, register new endpoint addresses for a service:
|
||||
//
|
||||
// func etcdAdd(c *clientv3.Client, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr})
|
||||
// }
|
||||
//
|
||||
// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
|
||||
//
|
||||
// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// b := grpc.RoundRobin(r)
|
||||
// return grpc.Dial(service, grpc.WithBalancer(b))
|
||||
// }
|
||||
//
|
||||
// Optionally, force delete an endpoint:
|
||||
//
|
||||
// func etcdDelete(c *clientv3, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
|
||||
// }
|
||||
//
|
||||
// Or register an expiring endpoint with a lease:
|
||||
//
|
||||
// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
|
||||
// r := &etcdnaming.GRPCResolver{Client: c}
|
||||
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
|
||||
// }
|
||||
//
|
||||
package naming
|
131
vendor/github.com/coreos/etcd/clientv3.old/naming/grpc.go
generated
vendored
Normal file
131
vendor/github.com/coreos/etcd/clientv3.old/naming/grpc.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package naming
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/naming"
|
||||
)
|
||||
|
||||
var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
|
||||
|
||||
// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
|
||||
type GRPCResolver struct {
|
||||
// Client is an initialized etcd client.
|
||||
Client *etcd.Client
|
||||
}
|
||||
|
||||
func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
|
||||
switch nm.Op {
|
||||
case naming.Add:
|
||||
var v []byte
|
||||
if v, err = json.Marshal(nm); err != nil {
|
||||
return grpc.Errorf(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
_, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
|
||||
case naming.Delete:
|
||||
_, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
|
||||
default:
|
||||
return grpc.Errorf(codes.InvalidArgument, "naming: bad naming op")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
type gRPCWatcher struct {
|
||||
c *etcd.Client
|
||||
target string
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wch etcd.WatchChan
|
||||
err error
|
||||
}
|
||||
|
||||
// Next gets the next set of updates from the etcd resolver.
|
||||
// Calls to Next should be serialized; concurrent calls are not safe since
|
||||
// there is no way to reconcile the update ordering.
|
||||
func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
|
||||
if gw.wch == nil {
|
||||
// first Next() returns all addresses
|
||||
return gw.firstNext()
|
||||
}
|
||||
if gw.err != nil {
|
||||
return nil, gw.err
|
||||
}
|
||||
|
||||
// process new events on target/*
|
||||
wr, ok := <-gw.wch
|
||||
if !ok {
|
||||
gw.err = grpc.Errorf(codes.Unavailable, "%s", ErrWatcherClosed)
|
||||
return nil, gw.err
|
||||
}
|
||||
if gw.err = wr.Err(); gw.err != nil {
|
||||
return nil, gw.err
|
||||
}
|
||||
|
||||
updates := make([]*naming.Update, 0, len(wr.Events))
|
||||
for _, e := range wr.Events {
|
||||
var jupdate naming.Update
|
||||
var err error
|
||||
switch e.Type {
|
||||
case etcd.EventTypePut:
|
||||
err = json.Unmarshal(e.Kv.Value, &jupdate)
|
||||
jupdate.Op = naming.Add
|
||||
case etcd.EventTypeDelete:
|
||||
err = json.Unmarshal(e.PrevKv.Value, &jupdate)
|
||||
jupdate.Op = naming.Delete
|
||||
}
|
||||
if err == nil {
|
||||
updates = append(updates, &jupdate)
|
||||
}
|
||||
}
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {
|
||||
// Use serialized request so resolution still works if the target etcd
|
||||
// server is partitioned away from the quorum.
|
||||
resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())
|
||||
if gw.err = err; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updates := make([]*naming.Update, 0, len(resp.Kvs))
|
||||
for _, kv := range resp.Kvs {
|
||||
var jupdate naming.Update
|
||||
if err := json.Unmarshal(kv.Value, &jupdate); err != nil {
|
||||
continue
|
||||
}
|
||||
updates = append(updates, &jupdate)
|
||||
}
|
||||
|
||||
opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}
|
||||
gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)
|
||||
return updates, nil
|
||||
}
|
||||
|
||||
func (gw *gRPCWatcher) Close() { gw.cancel() }
|
135
vendor/github.com/coreos/etcd/clientv3.old/naming/grpc_test.go
generated
vendored
Normal file
135
vendor/github.com/coreos/etcd/clientv3.old/naming/grpc_test.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package naming
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/naming"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestGRPCResolver(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
r := GRPCResolver{
|
||||
Client: clus.RandClient(),
|
||||
}
|
||||
|
||||
w, err := r.Resolve("foo")
|
||||
if err != nil {
|
||||
t.Fatal("failed to resolve foo", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
addOp := naming.Update{Op: naming.Add, Addr: "127.0.0.1", Metadata: "metadata"}
|
||||
err = r.Update(context.TODO(), "foo", addOp)
|
||||
if err != nil {
|
||||
t.Fatal("failed to add foo", err)
|
||||
}
|
||||
|
||||
us, err := w.Next()
|
||||
if err != nil {
|
||||
t.Fatal("failed to get udpate", err)
|
||||
}
|
||||
|
||||
wu := &naming.Update{
|
||||
Op: naming.Add,
|
||||
Addr: "127.0.0.1",
|
||||
Metadata: "metadata",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(us[0], wu) {
|
||||
t.Fatalf("up = %#v, want %#v", us[0], wu)
|
||||
}
|
||||
|
||||
delOp := naming.Update{Op: naming.Delete, Addr: "127.0.0.1"}
|
||||
err = r.Update(context.TODO(), "foo", delOp)
|
||||
|
||||
us, err = w.Next()
|
||||
if err != nil {
|
||||
t.Fatal("failed to get udpate", err)
|
||||
}
|
||||
|
||||
wu = &naming.Update{
|
||||
Op: naming.Delete,
|
||||
Addr: "127.0.0.1",
|
||||
Metadata: "metadata",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(us[0], wu) {
|
||||
t.Fatalf("up = %#v, want %#v", us[0], wu)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGRPCResolverMultiInit ensures the resolver will initialize
|
||||
// correctly with multiple hosts and correctly receive multiple
|
||||
// updates in a single revision.
|
||||
func TestGRPCResolverMulti(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
c := clus.RandClient()
|
||||
|
||||
v, verr := json.Marshal(naming.Update{Addr: "127.0.0.1", Metadata: "md"})
|
||||
if verr != nil {
|
||||
t.Fatal(verr)
|
||||
}
|
||||
if _, err := c.Put(context.TODO(), "foo/host", string(v)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := c.Put(context.TODO(), "foo/host2", string(v)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := GRPCResolver{c}
|
||||
|
||||
w, err := r.Resolve("foo")
|
||||
if err != nil {
|
||||
t.Fatal("failed to resolve foo", err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
updates, nerr := w.Next()
|
||||
if nerr != nil {
|
||||
t.Fatal(nerr)
|
||||
}
|
||||
if len(updates) != 2 {
|
||||
t.Fatalf("expected two updates, got %+v", updates)
|
||||
}
|
||||
|
||||
_, err = c.Txn(context.TODO()).Then(etcd.OpDelete("foo/host"), etcd.OpDelete("foo/host2")).Commit()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
updates, nerr = w.Next()
|
||||
if nerr != nil {
|
||||
t.Fatal(nerr)
|
||||
}
|
||||
if len(updates) != 2 || (updates[0].Op != naming.Delete && updates[1].Op != naming.Delete) {
|
||||
t.Fatalf("expected two updates, got %+v", updates)
|
||||
}
|
||||
}
|
481
vendor/github.com/coreos/etcd/clientv3.old/op.go
generated
vendored
Normal file
481
vendor/github.com/coreos/etcd/clientv3.old/op.go
generated
vendored
Normal file
@ -0,0 +1,481 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
type opType int
|
||||
|
||||
const (
|
||||
// A default Op has opType 0, which is invalid.
|
||||
tRange opType = iota + 1
|
||||
tPut
|
||||
tDeleteRange
|
||||
tTxn
|
||||
)
|
||||
|
||||
var (
|
||||
noPrefixEnd = []byte{0}
|
||||
)
|
||||
|
||||
// Op represents an Operation that kv can execute.
|
||||
type Op struct {
|
||||
t opType
|
||||
key []byte
|
||||
end []byte
|
||||
|
||||
// for range
|
||||
limit int64
|
||||
sort *SortOption
|
||||
serializable bool
|
||||
keysOnly bool
|
||||
countOnly bool
|
||||
minModRev int64
|
||||
maxModRev int64
|
||||
minCreateRev int64
|
||||
maxCreateRev int64
|
||||
|
||||
// for range, watch
|
||||
rev int64
|
||||
|
||||
// for watch, put, delete
|
||||
prevKV bool
|
||||
|
||||
// for put
|
||||
ignoreValue bool
|
||||
ignoreLease bool
|
||||
|
||||
// progressNotify is for progress updates.
|
||||
progressNotify bool
|
||||
// createdNotify is for created event
|
||||
createdNotify bool
|
||||
// filters for watchers
|
||||
filterPut bool
|
||||
filterDelete bool
|
||||
|
||||
// for put
|
||||
val []byte
|
||||
leaseID LeaseID
|
||||
|
||||
// txn
|
||||
cmps []Cmp
|
||||
thenOps []Op
|
||||
elseOps []Op
|
||||
}
|
||||
|
||||
// accesors / mutators
|
||||
|
||||
func (op Op) IsTxn() bool { return op.t == tTxn }
|
||||
func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
|
||||
|
||||
// KeyBytes returns the byte slice holding the Op's key.
|
||||
func (op Op) KeyBytes() []byte { return op.key }
|
||||
|
||||
// WithKeyBytes sets the byte slice for the Op's key.
|
||||
func (op *Op) WithKeyBytes(key []byte) { op.key = key }
|
||||
|
||||
// RangeBytes returns the byte slice holding with the Op's range end, if any.
|
||||
func (op Op) RangeBytes() []byte { return op.end }
|
||||
|
||||
// WithRangeBytes sets the byte slice for the Op's range end.
|
||||
func (op *Op) WithRangeBytes(end []byte) { op.end = end }
|
||||
|
||||
// ValueBytes returns the byte slice holding the Op's value, if any.
|
||||
func (op Op) ValueBytes() []byte { return op.val }
|
||||
|
||||
// WithValueBytes sets the byte slice for the Op's value.
|
||||
func (op *Op) WithValueBytes(v []byte) { op.val = v }
|
||||
|
||||
func (op Op) toRangeRequest() *pb.RangeRequest {
|
||||
if op.t != tRange {
|
||||
panic("op.t != tRange")
|
||||
}
|
||||
r := &pb.RangeRequest{
|
||||
Key: op.key,
|
||||
RangeEnd: op.end,
|
||||
Limit: op.limit,
|
||||
Revision: op.rev,
|
||||
Serializable: op.serializable,
|
||||
KeysOnly: op.keysOnly,
|
||||
CountOnly: op.countOnly,
|
||||
MinModRevision: op.minModRev,
|
||||
MaxModRevision: op.maxModRev,
|
||||
MinCreateRevision: op.minCreateRev,
|
||||
MaxCreateRevision: op.maxCreateRev,
|
||||
}
|
||||
if op.sort != nil {
|
||||
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
|
||||
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (op Op) toTxnRequest() *pb.TxnRequest {
|
||||
thenOps := make([]*pb.RequestOp, len(op.thenOps))
|
||||
for i, tOp := range op.thenOps {
|
||||
thenOps[i] = tOp.toRequestOp()
|
||||
}
|
||||
elseOps := make([]*pb.RequestOp, len(op.elseOps))
|
||||
for i, eOp := range op.elseOps {
|
||||
elseOps[i] = eOp.toRequestOp()
|
||||
}
|
||||
cmps := make([]*pb.Compare, len(op.cmps))
|
||||
for i := range op.cmps {
|
||||
cmps[i] = (*pb.Compare)(&op.cmps[i])
|
||||
}
|
||||
return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
|
||||
}
|
||||
|
||||
func (op Op) toRequestOp() *pb.RequestOp {
|
||||
switch op.t {
|
||||
case tRange:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
|
||||
case tPut:
|
||||
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
|
||||
case tDeleteRange:
|
||||
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
|
||||
case tTxn:
|
||||
return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
|
||||
default:
|
||||
panic("Unknown Op")
|
||||
}
|
||||
}
|
||||
|
||||
func (op Op) isWrite() bool {
|
||||
if op.t == tTxn {
|
||||
for _, tOp := range op.thenOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, tOp := range op.elseOps {
|
||||
if tOp.isWrite() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return op.t != tRange
|
||||
}
|
||||
|
||||
func OpGet(key string, opts ...OpOption) Op {
|
||||
ret := Op{t: tRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
return ret
|
||||
}
|
||||
|
||||
func OpDelete(key string, opts ...OpOption) Op {
|
||||
ret := Op{t: tDeleteRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.leaseID != 0:
|
||||
panic("unexpected lease in delete")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in delete")
|
||||
case ret.rev != 0:
|
||||
panic("unexpected revision in delete")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in delete")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in delete")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in delete")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in delete")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in delete")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in delete")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in delete")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func OpPut(key, val string, opts ...OpOption) Op {
|
||||
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.end != nil:
|
||||
panic("unexpected range in put")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in put")
|
||||
case ret.rev != 0:
|
||||
panic("unexpected revision in put")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in put")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in put")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in put")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in put")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in put")
|
||||
case ret.filterDelete, ret.filterPut:
|
||||
panic("unexpected filter in put")
|
||||
case ret.createdNotify:
|
||||
panic("unexpected createdNotify in put")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
|
||||
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
|
||||
}
|
||||
|
||||
func opWatch(key string, opts ...OpOption) Op {
|
||||
ret := Op{t: tRange, key: []byte(key)}
|
||||
ret.applyOpts(opts)
|
||||
switch {
|
||||
case ret.leaseID != 0:
|
||||
panic("unexpected lease in watch")
|
||||
case ret.limit != 0:
|
||||
panic("unexpected limit in watch")
|
||||
case ret.sort != nil:
|
||||
panic("unexpected sort in watch")
|
||||
case ret.serializable:
|
||||
panic("unexpected serializable in watch")
|
||||
case ret.countOnly:
|
||||
panic("unexpected countOnly in watch")
|
||||
case ret.minModRev != 0, ret.maxModRev != 0:
|
||||
panic("unexpected mod revision filter in watch")
|
||||
case ret.minCreateRev != 0, ret.maxCreateRev != 0:
|
||||
panic("unexpected create revision filter in watch")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (op *Op) applyOpts(opts []OpOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// OpOption configures Operations like Get, Put, Delete.
|
||||
type OpOption func(*Op)
|
||||
|
||||
// WithLease attaches a lease ID to a key in 'Put' request.
|
||||
func WithLease(leaseID LeaseID) OpOption {
|
||||
return func(op *Op) { op.leaseID = leaseID }
|
||||
}
|
||||
|
||||
// WithLimit limits the number of results to return from 'Get' request.
|
||||
// If WithLimit is given a 0 limit, it is treated as no limit.
|
||||
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
|
||||
|
||||
// WithRev specifies the store revision for 'Get' request.
|
||||
// Or the start revision of 'Watch' request.
|
||||
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
|
||||
|
||||
// WithSort specifies the ordering in 'Get' request. It requires
|
||||
// 'WithRange' and/or 'WithPrefix' to be specified too.
|
||||
// 'target' specifies the target to sort by: key, version, revisions, value.
|
||||
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
|
||||
func WithSort(target SortTarget, order SortOrder) OpOption {
|
||||
return func(op *Op) {
|
||||
if target == SortByKey && order == SortAscend {
|
||||
// If order != SortNone, server fetches the entire key-space,
|
||||
// and then applies the sort and limit, if provided.
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexicographically ascending order,
|
||||
// client should ignore SortOrder if the target is SortByKey.
|
||||
order = SortNone
|
||||
}
|
||||
op.sort = &SortOption{target, order}
|
||||
}
|
||||
}
|
||||
|
||||
// GetPrefixRangeEnd gets the range end of the prefix.
|
||||
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
|
||||
func GetPrefixRangeEnd(prefix string) string {
|
||||
return string(getPrefix([]byte(prefix)))
|
||||
}
|
||||
|
||||
func getPrefix(key []byte) []byte {
|
||||
end := make([]byte, len(key))
|
||||
copy(end, key)
|
||||
for i := len(end) - 1; i >= 0; i-- {
|
||||
if end[i] < 0xff {
|
||||
end[i] = end[i] + 1
|
||||
end = end[:i+1]
|
||||
return end
|
||||
}
|
||||
}
|
||||
// next prefix does not exist (e.g., 0xffff);
|
||||
// default to WithFromKey policy
|
||||
return noPrefixEnd
|
||||
}
|
||||
|
||||
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
|
||||
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
|
||||
// can return 'foo1', 'foo2', and so on.
|
||||
func WithPrefix() OpOption {
|
||||
return func(op *Op) {
|
||||
if len(op.key) == 0 {
|
||||
op.key, op.end = []byte{0}, []byte{0}
|
||||
return
|
||||
}
|
||||
op.end = getPrefix(op.key)
|
||||
}
|
||||
}
|
||||
|
||||
// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
|
||||
// For example, 'Get' requests with 'WithRange(end)' returns
|
||||
// the keys in the range [key, end).
|
||||
// endKey must be lexicographically greater than start key.
|
||||
func WithRange(endKey string) OpOption {
|
||||
return func(op *Op) { op.end = []byte(endKey) }
|
||||
}
|
||||
|
||||
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
|
||||
// to be equal or greater than the key in the argument.
|
||||
func WithFromKey() OpOption { return WithRange("\x00") }
|
||||
|
||||
// WithSerializable makes 'Get' request serializable. By default,
|
||||
// it's linearizable. Serializable requests are better for lower latency
|
||||
// requirement.
|
||||
func WithSerializable() OpOption {
|
||||
return func(op *Op) { op.serializable = true }
|
||||
}
|
||||
|
||||
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
|
||||
// values will be omitted.
|
||||
func WithKeysOnly() OpOption {
|
||||
return func(op *Op) { op.keysOnly = true }
|
||||
}
|
||||
|
||||
// WithCountOnly makes the 'Get' request return only the count of keys.
|
||||
func WithCountOnly() OpOption {
|
||||
return func(op *Op) { op.countOnly = true }
|
||||
}
|
||||
|
||||
// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
|
||||
func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
|
||||
|
||||
// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
|
||||
func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
|
||||
|
||||
// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
|
||||
func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
|
||||
|
||||
// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
|
||||
func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
|
||||
|
||||
// WithFirstCreate gets the key with the oldest creation revision in the request range.
|
||||
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
|
||||
|
||||
// WithLastCreate gets the key with the latest creation revision in the request range.
|
||||
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
|
||||
|
||||
// WithFirstKey gets the lexically first key in the request range.
|
||||
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
|
||||
|
||||
// WithLastKey gets the lexically last key in the request range.
|
||||
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
|
||||
|
||||
// WithFirstRev gets the key with the oldest modification revision in the request range.
|
||||
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
|
||||
|
||||
// WithLastRev gets the key with the latest modification revision in the request range.
|
||||
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
|
||||
|
||||
// withTop gets the first key over the get's prefix given a sort order
|
||||
func withTop(target SortTarget, order SortOrder) []OpOption {
|
||||
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
|
||||
}
|
||||
|
||||
// WithProgressNotify makes watch server send periodic progress updates
|
||||
// every 10 minutes when there is no incoming events.
|
||||
// Progress updates have zero events in WatchResponse.
|
||||
func WithProgressNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
op.progressNotify = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithCreatedNotify makes watch server sends the created event.
|
||||
func WithCreatedNotify() OpOption {
|
||||
return func(op *Op) {
|
||||
op.createdNotify = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilterPut discards PUT events from the watcher.
|
||||
func WithFilterPut() OpOption {
|
||||
return func(op *Op) { op.filterPut = true }
|
||||
}
|
||||
|
||||
// WithFilterDelete discards DELETE events from the watcher.
|
||||
func WithFilterDelete() OpOption {
|
||||
return func(op *Op) { op.filterDelete = true }
|
||||
}
|
||||
|
||||
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
|
||||
// nothing will be returned.
|
||||
func WithPrevKV() OpOption {
|
||||
return func(op *Op) {
|
||||
op.prevKV = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithIgnoreValue updates the key using its current value.
|
||||
// Empty value should be passed when ignore_value is set.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreValue() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreValue = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithIgnoreLease updates the key using its current lease.
|
||||
// Empty lease should be passed when ignore_lease is set.
|
||||
// Returns an error if the key does not exist.
|
||||
func WithIgnoreLease() OpOption {
|
||||
return func(op *Op) {
|
||||
op.ignoreLease = true
|
||||
}
|
||||
}
|
||||
|
||||
// LeaseOp represents an Operation that lease can execute.
|
||||
type LeaseOp struct {
|
||||
id LeaseID
|
||||
|
||||
// for TimeToLive
|
||||
attachedKeys bool
|
||||
}
|
||||
|
||||
// LeaseOption configures lease operations.
|
||||
type LeaseOption func(*LeaseOp)
|
||||
|
||||
func (op *LeaseOp) applyOpts(opts []LeaseOption) {
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttachedKeys requests lease timetolive API to return
|
||||
// attached keys of given lease ID.
|
||||
func WithAttachedKeys() LeaseOption {
|
||||
return func(op *LeaseOp) { op.attachedKeys = true }
|
||||
}
|
||||
|
||||
func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
|
||||
ret := &LeaseOp{id: id}
|
||||
ret.applyOpts(opts)
|
||||
return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
|
||||
}
|
38
vendor/github.com/coreos/etcd/clientv3.old/op_test.go
generated
vendored
Normal file
38
vendor/github.com/coreos/etcd/clientv3.old/op_test.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
// TestOpWithSort tests if WithSort(ASCEND, KEY) and WithLimit are specified,
|
||||
// RangeRequest ignores the SortOption to avoid unnecessarily fetching
|
||||
// the entire key-space.
|
||||
func TestOpWithSort(t *testing.T) {
|
||||
opReq := OpGet("foo", WithSort(SortByKey, SortAscend), WithLimit(10)).toRequestOp().Request
|
||||
q, ok := opReq.(*pb.RequestOp_RequestRange)
|
||||
if !ok {
|
||||
t.Fatalf("expected range request, got %v", reflect.TypeOf(opReq))
|
||||
}
|
||||
req := q.RequestRange
|
||||
wreq := &pb.RangeRequest{Key: []byte("foo"), SortOrder: pb.RangeRequest_NONE, Limit: 10}
|
||||
if !reflect.DeepEqual(req, wreq) {
|
||||
t.Fatalf("expected %+v, got %+v", wreq, req)
|
||||
}
|
||||
}
|
293
vendor/github.com/coreos/etcd/clientv3.old/retry.go
generated
vendored
Normal file
293
vendor/github.com/coreos/etcd/clientv3.old/retry.go
generated
vendored
Normal file
@ -0,0 +1,293 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type rpcFunc func(ctx context.Context) error
|
||||
type retryRpcFunc func(context.Context, rpcFunc) error
|
||||
|
||||
func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
return err
|
||||
}
|
||||
|
||||
select {
|
||||
case <-c.balancer.ConnectNotify():
|
||||
case <-rpcCtx.Done():
|
||||
return rpcCtx.Err()
|
||||
case <-c.ctx.Done():
|
||||
return c.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryLeaseClient struct {
|
||||
pb.LeaseClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
|
||||
err = rlc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryClusterClient struct {
|
||||
pb.ClusterClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
|
||||
func RetryClusterClient(c *Client) pb.ClusterClient {
|
||||
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
|
||||
err = rcc.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryAuthClient struct {
|
||||
pb.AuthClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
|
||||
func RetryAuthClient(c *Client) pb.AuthClient {
|
||||
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
|
||||
err = rac.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
37
vendor/github.com/coreos/etcd/clientv3.old/sort.go
generated
vendored
Normal file
37
vendor/github.com/coreos/etcd/clientv3.old/sort.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
type SortTarget int
|
||||
type SortOrder int
|
||||
|
||||
const (
|
||||
SortNone SortOrder = iota
|
||||
SortAscend
|
||||
SortDescend
|
||||
)
|
||||
|
||||
const (
|
||||
SortByKey SortTarget = iota
|
||||
SortByVersion
|
||||
SortByCreateRevision
|
||||
SortByModRevision
|
||||
SortByValue
|
||||
)
|
||||
|
||||
type SortOption struct {
|
||||
Target SortTarget
|
||||
Order SortOrder
|
||||
}
|
164
vendor/github.com/coreos/etcd/clientv3.old/txn.go
generated
vendored
Normal file
164
vendor/github.com/coreos/etcd/clientv3.old/txn.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Txn is the interface that wraps mini-transactions.
|
||||
//
|
||||
// Tx.If(
|
||||
// Compare(Value(k1), ">", v1),
|
||||
// Compare(Version(k1), "=", 2)
|
||||
// ).Then(
|
||||
// OpPut(k2,v2), OpPut(k3,v3)
|
||||
// ).Else(
|
||||
// OpPut(k4,v4), OpPut(k5,v5)
|
||||
// ).Commit()
|
||||
//
|
||||
type Txn interface {
|
||||
// If takes a list of comparison. If all comparisons passed in succeed,
|
||||
// the operations passed into Then() will be executed. Or the operations
|
||||
// passed into Else() will be executed.
|
||||
If(cs ...Cmp) Txn
|
||||
|
||||
// Then takes a list of operations. The Ops list will be executed, if the
|
||||
// comparisons passed in If() succeed.
|
||||
Then(ops ...Op) Txn
|
||||
|
||||
// Else takes a list of operations. The Ops list will be executed, if the
|
||||
// comparisons passed in If() fail.
|
||||
Else(ops ...Op) Txn
|
||||
|
||||
// Commit tries to commit the transaction.
|
||||
Commit() (*TxnResponse, error)
|
||||
}
|
||||
|
||||
type txn struct {
|
||||
kv *kv
|
||||
ctx context.Context
|
||||
|
||||
mu sync.Mutex
|
||||
cif bool
|
||||
cthen bool
|
||||
celse bool
|
||||
|
||||
isWrite bool
|
||||
|
||||
cmps []*pb.Compare
|
||||
|
||||
sus []*pb.RequestOp
|
||||
fas []*pb.RequestOp
|
||||
}
|
||||
|
||||
func (txn *txn) If(cs ...Cmp) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.cif {
|
||||
panic("cannot call If twice!")
|
||||
}
|
||||
|
||||
if txn.cthen {
|
||||
panic("cannot call If after Then!")
|
||||
}
|
||||
|
||||
if txn.celse {
|
||||
panic("cannot call If after Else!")
|
||||
}
|
||||
|
||||
txn.cif = true
|
||||
|
||||
for i := range cs {
|
||||
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Then(ops ...Op) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.cthen {
|
||||
panic("cannot call Then twice!")
|
||||
}
|
||||
if txn.celse {
|
||||
panic("cannot call Then after Else!")
|
||||
}
|
||||
|
||||
txn.cthen = true
|
||||
|
||||
for _, op := range ops {
|
||||
txn.isWrite = txn.isWrite || op.isWrite()
|
||||
txn.sus = append(txn.sus, op.toRequestOp())
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Else(ops ...Op) Txn {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
|
||||
if txn.celse {
|
||||
panic("cannot call Else twice!")
|
||||
}
|
||||
|
||||
txn.celse = true
|
||||
|
||||
for _, op := range ops {
|
||||
txn.isWrite = txn.isWrite || op.isWrite()
|
||||
txn.fas = append(txn.fas, op.toRequestOp())
|
||||
}
|
||||
|
||||
return txn
|
||||
}
|
||||
|
||||
func (txn *txn) Commit() (*TxnResponse, error) {
|
||||
txn.mu.Lock()
|
||||
defer txn.mu.Unlock()
|
||||
for {
|
||||
resp, err := txn.commit()
|
||||
if err == nil {
|
||||
return resp, err
|
||||
}
|
||||
if isHaltErr(txn.ctx, err) {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
if txn.isWrite {
|
||||
return nil, toErr(txn.ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (txn *txn) commit() (*TxnResponse, error) {
|
||||
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
|
||||
|
||||
var opts []grpc.CallOption
|
||||
if !txn.isWrite {
|
||||
opts = []grpc.CallOption{grpc.FailFast(false)}
|
||||
}
|
||||
resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return (*TxnResponse)(resp), nil
|
||||
}
|
105
vendor/github.com/coreos/etcd/clientv3.old/txn_test.go
generated
vendored
Normal file
105
vendor/github.com/coreos/etcd/clientv3.old/txn_test.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
)
|
||||
|
||||
func TestTxnPanics(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
kv := &kv{}
|
||||
|
||||
errc := make(chan string)
|
||||
df := func() {
|
||||
if s := recover(); s != nil {
|
||||
errc <- s.(string)
|
||||
}
|
||||
}
|
||||
|
||||
cmp := Compare(CreateRevision("foo"), "=", 0)
|
||||
op := OpPut("foo", "bar")
|
||||
|
||||
tests := []struct {
|
||||
f func()
|
||||
|
||||
err string
|
||||
}{
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).If(cmp).If(cmp)
|
||||
},
|
||||
|
||||
err: "cannot call If twice!",
|
||||
},
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).Then(op).If(cmp)
|
||||
},
|
||||
|
||||
err: "cannot call If after Then!",
|
||||
},
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).Else(op).If(cmp)
|
||||
},
|
||||
|
||||
err: "cannot call If after Else!",
|
||||
},
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).Then(op).Then(op)
|
||||
},
|
||||
|
||||
err: "cannot call Then twice!",
|
||||
},
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).Else(op).Then(op)
|
||||
},
|
||||
|
||||
err: "cannot call Then after Else!",
|
||||
},
|
||||
{
|
||||
f: func() {
|
||||
defer df()
|
||||
kv.Txn(nil).Else(op).Else(op)
|
||||
},
|
||||
|
||||
err: "cannot call Else twice!",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
go tt.f()
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != tt.err {
|
||||
t.Errorf("#%d: got %s, wanted %s", i, err, tt.err)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Errorf("#%d: did not panic, wanted panic %s", i, tt.err)
|
||||
}
|
||||
}
|
||||
}
|
796
vendor/github.com/coreos/etcd/clientv3.old/watch.go
generated
vendored
Normal file
796
vendor/github.com/coreos/etcd/clientv3.old/watch.go
generated
vendored
Normal file
@ -0,0 +1,796 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
const (
|
||||
EventTypeDelete = mvccpb.DELETE
|
||||
EventTypePut = mvccpb.PUT
|
||||
|
||||
closeSendErrTimeout = 250 * time.Millisecond
|
||||
)
|
||||
|
||||
type Event mvccpb.Event
|
||||
|
||||
type WatchChan <-chan WatchResponse
|
||||
|
||||
type Watcher interface {
|
||||
// Watch watches on a key or prefix. The watched events will be returned
|
||||
// through the returned channel. If revisions waiting to be sent over the
|
||||
// watch are compacted, then the watch will be canceled by the server, the
|
||||
// client will post a compacted error watch response, and the channel will close.
|
||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||
|
||||
// Close closes the watcher and cancels all watch requests.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type WatchResponse struct {
|
||||
Header pb.ResponseHeader
|
||||
Events []*Event
|
||||
|
||||
// CompactRevision is the minimum revision the watcher may receive.
|
||||
CompactRevision int64
|
||||
|
||||
// Canceled is used to indicate watch failure.
|
||||
// If the watch failed and the stream was about to close, before the channel is closed,
|
||||
// the channel sends a final response that has Canceled set to true with a non-nil Err().
|
||||
Canceled bool
|
||||
|
||||
// Created is used to indicate the creation of the watcher.
|
||||
Created bool
|
||||
|
||||
closeErr error
|
||||
|
||||
// cancelReason is a reason of canceling watch
|
||||
cancelReason string
|
||||
}
|
||||
|
||||
// IsCreate returns true if the event tells that the key is newly created.
|
||||
func (e *Event) IsCreate() bool {
|
||||
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
|
||||
}
|
||||
|
||||
// IsModify returns true if the event tells that a new value is put on existing key.
|
||||
func (e *Event) IsModify() bool {
|
||||
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
|
||||
}
|
||||
|
||||
// Err is the error value if this WatchResponse holds an error.
|
||||
func (wr *WatchResponse) Err() error {
|
||||
switch {
|
||||
case wr.closeErr != nil:
|
||||
return v3rpc.Error(wr.closeErr)
|
||||
case wr.CompactRevision != 0:
|
||||
return v3rpc.ErrCompacted
|
||||
case wr.Canceled:
|
||||
if len(wr.cancelReason) != 0 {
|
||||
return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason))
|
||||
}
|
||||
return v3rpc.ErrFutureRev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsProgressNotify returns true if the WatchResponse is progress notification.
|
||||
func (wr *WatchResponse) IsProgressNotify() bool {
|
||||
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
|
||||
}
|
||||
|
||||
// watcher implements the Watcher interface
|
||||
type watcher struct {
|
||||
remote pb.WatchClient
|
||||
|
||||
// mu protects the grpc streams map
|
||||
mu sync.RWMutex
|
||||
|
||||
// streams holds all the active grpc streams keyed by ctx value.
|
||||
streams map[string]*watchGrpcStream
|
||||
}
|
||||
|
||||
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||
type watchGrpcStream struct {
|
||||
owner *watcher
|
||||
remote pb.WatchClient
|
||||
|
||||
// ctx controls internal remote.Watch requests
|
||||
ctx context.Context
|
||||
// ctxKey is the key used when looking up this stream's context
|
||||
ctxKey string
|
||||
cancel context.CancelFunc
|
||||
|
||||
// substreams holds all active watchers on this grpc stream
|
||||
substreams map[int64]*watcherStream
|
||||
// resuming holds all resuming watchers on this grpc stream
|
||||
resuming []*watcherStream
|
||||
|
||||
// reqc sends a watch request from Watch() to the main goroutine
|
||||
reqc chan *watchRequest
|
||||
// respc receives data from the watch client
|
||||
respc chan *pb.WatchResponse
|
||||
// donec closes to broadcast shutdown
|
||||
donec chan struct{}
|
||||
// errc transmits errors from grpc Recv to the watch stream reconn logic
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
// wg is Done when all substream goroutines have exited
|
||||
wg sync.WaitGroup
|
||||
|
||||
// resumec closes to signal that all substreams should begin resuming
|
||||
resumec chan struct{}
|
||||
// closeErr is the error that closed the watch stream
|
||||
closeErr error
|
||||
}
|
||||
|
||||
// watchRequest is issued by the subscriber to start a new watcher
|
||||
type watchRequest struct {
|
||||
ctx context.Context
|
||||
key string
|
||||
end string
|
||||
rev int64
|
||||
// send created notification event if this field is true
|
||||
createdNotify bool
|
||||
// progressNotify is for progress updates
|
||||
progressNotify bool
|
||||
// filters is the list of events to filter out
|
||||
filters []pb.WatchCreateRequest_FilterType
|
||||
// get the previous key-value pair before the event happens
|
||||
prevKV bool
|
||||
// retc receives a chan WatchResponse once the watcher is established
|
||||
retc chan chan WatchResponse
|
||||
}
|
||||
|
||||
// watcherStream represents a registered watcher
|
||||
type watcherStream struct {
|
||||
// initReq is the request that initiated this request
|
||||
initReq watchRequest
|
||||
|
||||
// outc publishes watch responses to subscriber
|
||||
outc chan WatchResponse
|
||||
// recvc buffers watch responses before publishing
|
||||
recvc chan *WatchResponse
|
||||
// donec closes when the watcherStream goroutine stops.
|
||||
donec chan struct{}
|
||||
// closing is set to true when stream should be scheduled to shutdown.
|
||||
closing bool
|
||||
// id is the registered watch id on the grpc stream
|
||||
id int64
|
||||
|
||||
// buf holds all events received from etcd but not yet consumed by the client
|
||||
buf []*WatchResponse
|
||||
}
|
||||
|
||||
func NewWatcher(c *Client) Watcher {
|
||||
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
|
||||
}
|
||||
|
||||
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
|
||||
return &watcher{
|
||||
remote: wc,
|
||||
streams: make(map[string]*watchGrpcStream),
|
||||
}
|
||||
}
|
||||
|
||||
// never closes
|
||||
var valCtxCh = make(chan struct{})
|
||||
var zeroTime = time.Unix(0, 0)
|
||||
|
||||
// ctx with only the values; never Done
|
||||
type valCtx struct{ context.Context }
|
||||
|
||||
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
|
||||
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
|
||||
func (vc *valCtx) Err() error { return nil }
|
||||
|
||||
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
ctx, cancel := context.WithCancel(&valCtx{inctx})
|
||||
wgs := &watchGrpcStream{
|
||||
owner: w,
|
||||
remote: w.remote,
|
||||
ctx: ctx,
|
||||
ctxKey: fmt.Sprintf("%v", inctx),
|
||||
cancel: cancel,
|
||||
substreams: make(map[int64]*watcherStream),
|
||||
|
||||
respc: make(chan *pb.WatchResponse),
|
||||
reqc: make(chan *watchRequest),
|
||||
donec: make(chan struct{}),
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
}
|
||||
go wgs.run()
|
||||
return wgs
|
||||
}
|
||||
|
||||
// Watch posts a watch request to run() and waits for a new watcher channel
|
||||
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
|
||||
ow := opWatch(key, opts...)
|
||||
|
||||
var filters []pb.WatchCreateRequest_FilterType
|
||||
if ow.filterPut {
|
||||
filters = append(filters, pb.WatchCreateRequest_NOPUT)
|
||||
}
|
||||
if ow.filterDelete {
|
||||
filters = append(filters, pb.WatchCreateRequest_NODELETE)
|
||||
}
|
||||
|
||||
wr := &watchRequest{
|
||||
ctx: ctx,
|
||||
createdNotify: ow.createdNotify,
|
||||
key: string(ow.key),
|
||||
end: string(ow.end),
|
||||
rev: ow.rev,
|
||||
progressNotify: ow.progressNotify,
|
||||
filters: filters,
|
||||
prevKV: ow.prevKV,
|
||||
retc: make(chan chan WatchResponse, 1),
|
||||
}
|
||||
|
||||
ok := false
|
||||
ctxKey := fmt.Sprintf("%v", ctx)
|
||||
|
||||
// find or allocate appropriate grpc watch stream
|
||||
w.mu.Lock()
|
||||
if w.streams == nil {
|
||||
// closed
|
||||
w.mu.Unlock()
|
||||
ch := make(chan WatchResponse)
|
||||
close(ch)
|
||||
return ch
|
||||
}
|
||||
wgs := w.streams[ctxKey]
|
||||
if wgs == nil {
|
||||
wgs = w.newWatcherGrpcStream(ctx)
|
||||
w.streams[ctxKey] = wgs
|
||||
}
|
||||
donec := wgs.donec
|
||||
reqc := wgs.reqc
|
||||
w.mu.Unlock()
|
||||
|
||||
// couldn't create channel; return closed channel
|
||||
closeCh := make(chan WatchResponse, 1)
|
||||
|
||||
// submit request
|
||||
select {
|
||||
case reqc <- wr:
|
||||
ok = true
|
||||
case <-wr.ctx.Done():
|
||||
case <-donec:
|
||||
if wgs.closeErr != nil {
|
||||
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
||||
break
|
||||
}
|
||||
// retry; may have dropped stream from no ctxs
|
||||
return w.Watch(ctx, key, opts...)
|
||||
}
|
||||
|
||||
// receive channel
|
||||
if ok {
|
||||
select {
|
||||
case ret := <-wr.retc:
|
||||
return ret
|
||||
case <-ctx.Done():
|
||||
case <-donec:
|
||||
if wgs.closeErr != nil {
|
||||
closeCh <- WatchResponse{closeErr: wgs.closeErr}
|
||||
break
|
||||
}
|
||||
// retry; may have dropped stream from no ctxs
|
||||
return w.Watch(ctx, key, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
close(closeCh)
|
||||
return closeCh
|
||||
}
|
||||
|
||||
func (w *watcher) Close() (err error) {
|
||||
w.mu.Lock()
|
||||
streams := w.streams
|
||||
w.streams = nil
|
||||
w.mu.Unlock()
|
||||
for _, wgs := range streams {
|
||||
if werr := wgs.close(); werr != nil {
|
||||
err = werr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) close() (err error) {
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
case err = <-w.errc:
|
||||
default:
|
||||
}
|
||||
return toErr(w.ctx, err)
|
||||
}
|
||||
|
||||
func (w *watcher) closeStream(wgs *watchGrpcStream) {
|
||||
w.mu.Lock()
|
||||
close(wgs.donec)
|
||||
wgs.cancel()
|
||||
if w.streams != nil {
|
||||
delete(w.streams, wgs.ctxKey)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
|
||||
if resp.WatchId == -1 {
|
||||
// failed; no channel
|
||||
close(ws.recvc)
|
||||
return
|
||||
}
|
||||
ws.id = resp.WatchId
|
||||
w.substreams[ws.id] = ws
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
|
||||
select {
|
||||
case ws.outc <- *resp:
|
||||
case <-ws.initReq.ctx.Done():
|
||||
case <-time.After(closeSendErrTimeout):
|
||||
}
|
||||
close(ws.outc)
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
|
||||
// send channel response in case stream was never established
|
||||
select {
|
||||
case ws.initReq.retc <- ws.outc:
|
||||
default:
|
||||
}
|
||||
// close subscriber's channel
|
||||
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
|
||||
go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
|
||||
} else if ws.outc != nil {
|
||||
close(ws.outc)
|
||||
}
|
||||
if ws.id != -1 {
|
||||
delete(w.substreams, ws.id)
|
||||
return
|
||||
}
|
||||
for i := range w.resuming {
|
||||
if w.resuming[i] == ws {
|
||||
w.resuming[i] = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run is the root of the goroutines for managing a watcher client
|
||||
func (w *watchGrpcStream) run() {
|
||||
var wc pb.Watch_WatchClient
|
||||
var closeErr error
|
||||
|
||||
// substreams marked to close but goroutine still running; needed for
|
||||
// avoiding double-closing recvc on grpc stream teardown
|
||||
closing := make(map[*watcherStream]struct{})
|
||||
|
||||
defer func() {
|
||||
w.closeErr = closeErr
|
||||
// shutdown substreams and resuming substreams
|
||||
for _, ws := range w.substreams {
|
||||
if _, ok := closing[ws]; !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if _, ok := closing[ws]; ws != nil && !ok {
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
}
|
||||
w.joinSubstreams()
|
||||
for range closing {
|
||||
w.closeSubstream(<-w.closingc)
|
||||
}
|
||||
w.wg.Wait()
|
||||
w.owner.closeStream(w)
|
||||
}()
|
||||
|
||||
// start a stream with the etcd grpc server
|
||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cancelSet := make(map[int64]struct{})
|
||||
|
||||
for {
|
||||
select {
|
||||
// Watch() requested
|
||||
case wreq := <-w.reqc:
|
||||
outc := make(chan WatchResponse, 1)
|
||||
ws := &watcherStream{
|
||||
initReq: *wreq,
|
||||
id: -1,
|
||||
outc: outc,
|
||||
// unbufffered so resumes won't cause repeat events
|
||||
recvc: make(chan *WatchResponse),
|
||||
}
|
||||
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
|
||||
// queue up for watcher creation/resume
|
||||
w.resuming = append(w.resuming, ws)
|
||||
if len(w.resuming) == 1 {
|
||||
// head of resume queue, can register a new watcher
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
// New events from the watch client
|
||||
case pbresp := <-w.respc:
|
||||
switch {
|
||||
case pbresp.Created:
|
||||
// response to head of queue creation
|
||||
if ws := w.resuming[0]; ws != nil {
|
||||
w.addSubstream(pbresp, ws)
|
||||
w.dispatchEvent(pbresp)
|
||||
w.resuming[0] = nil
|
||||
}
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
case pbresp.Canceled && pbresp.CompactRevision == 0:
|
||||
delete(cancelSet, pbresp.WatchId)
|
||||
if ws, ok := w.substreams[pbresp.WatchId]; ok {
|
||||
// signal to stream goroutine to update closingc
|
||||
close(ws.recvc)
|
||||
closing[ws] = struct{}{}
|
||||
}
|
||||
default:
|
||||
// dispatch to appropriate watch stream
|
||||
if ok := w.dispatchEvent(pbresp); ok {
|
||||
break
|
||||
}
|
||||
// watch response on unexpected watch id; cancel id
|
||||
if _, ok := cancelSet[pbresp.WatchId]; ok {
|
||||
break
|
||||
}
|
||||
cancelSet[pbresp.WatchId] = struct{}{}
|
||||
cr := &pb.WatchRequest_CancelRequest{
|
||||
CancelRequest: &pb.WatchCancelRequest{
|
||||
WatchId: pbresp.WatchId,
|
||||
},
|
||||
}
|
||||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
wc.Send(req)
|
||||
}
|
||||
// watch client failed to recv; spawn another if possible
|
||||
case err := <-w.errc:
|
||||
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
|
||||
closeErr = err
|
||||
return
|
||||
}
|
||||
if wc, closeErr = w.newWatchClient(); closeErr != nil {
|
||||
return
|
||||
}
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
}
|
||||
cancelSet = make(map[int64]struct{})
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case ws := <-w.closingc:
|
||||
w.closeSubstream(ws)
|
||||
delete(closing, ws)
|
||||
if len(w.substreams)+len(w.resuming) == 0 {
|
||||
// no more watchers on this stream, shutdown
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nextResume chooses the next resuming to register with the grpc stream. Abandoned
|
||||
// streams are marked as nil in the queue since the head must wait for its inflight registration.
|
||||
func (w *watchGrpcStream) nextResume() *watcherStream {
|
||||
for len(w.resuming) != 0 {
|
||||
if w.resuming[0] != nil {
|
||||
return w.resuming[0]
|
||||
}
|
||||
w.resuming = w.resuming[1:len(w.resuming)]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
|
||||
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
|
||||
events := make([]*Event, len(pbresp.Events))
|
||||
for i, ev := range pbresp.Events {
|
||||
events[i] = (*Event)(ev)
|
||||
}
|
||||
wr := &WatchResponse{
|
||||
Header: *pbresp.Header,
|
||||
Events: events,
|
||||
CompactRevision: pbresp.CompactRevision,
|
||||
Created: pbresp.Created,
|
||||
Canceled: pbresp.Canceled,
|
||||
cancelReason: pbresp.CancelReason,
|
||||
}
|
||||
ws, ok := w.substreams[pbresp.WatchId]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
select {
|
||||
case ws.recvc <- wr:
|
||||
case <-ws.donec:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// serveWatchClient forwards messages from the grpc stream to run()
|
||||
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
|
||||
for {
|
||||
resp, err := wc.Recv()
|
||||
if err != nil {
|
||||
select {
|
||||
case w.errc <- err:
|
||||
case <-w.donec:
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
case w.respc <- resp:
|
||||
case <-w.donec:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// serveSubstream forwards watch responses from run() to the subscriber
|
||||
func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
|
||||
if ws.closing {
|
||||
panic("created substream goroutine but substream is closing")
|
||||
}
|
||||
|
||||
// nextRev is the minimum expected next revision
|
||||
nextRev := ws.initReq.rev
|
||||
resuming := false
|
||||
defer func() {
|
||||
if !resuming {
|
||||
ws.closing = true
|
||||
}
|
||||
close(ws.donec)
|
||||
if !resuming {
|
||||
w.closingc <- ws
|
||||
}
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
emptyWr := &WatchResponse{}
|
||||
for {
|
||||
curWr := emptyWr
|
||||
outc := ws.outc
|
||||
|
||||
if len(ws.buf) > 0 {
|
||||
curWr = ws.buf[0]
|
||||
} else {
|
||||
outc = nil
|
||||
}
|
||||
select {
|
||||
case outc <- *curWr:
|
||||
if ws.buf[0].Err() != nil {
|
||||
return
|
||||
}
|
||||
ws.buf[0] = nil
|
||||
ws.buf = ws.buf[1:]
|
||||
case wr, ok := <-ws.recvc:
|
||||
if !ok {
|
||||
// shutdown from closeSubstream
|
||||
return
|
||||
}
|
||||
|
||||
if wr.Created {
|
||||
if ws.initReq.retc != nil {
|
||||
ws.initReq.retc <- ws.outc
|
||||
// to prevent next write from taking the slot in buffered channel
|
||||
// and posting duplicate create events
|
||||
ws.initReq.retc = nil
|
||||
|
||||
// send first creation event only if requested
|
||||
if ws.initReq.createdNotify {
|
||||
ws.outc <- *wr
|
||||
}
|
||||
// once the watch channel is returned, a current revision
|
||||
// watch must resume at the store revision. This is necessary
|
||||
// for the following case to work as expected:
|
||||
// wch := m1.Watch("a")
|
||||
// m2.Put("a", "b")
|
||||
// <-wch
|
||||
// If the revision is only bound on the first observed event,
|
||||
// if wch is disconnected before the Put is issued, then reconnects
|
||||
// after it is committed, it'll miss the Put.
|
||||
if ws.initReq.rev == 0 {
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// current progress of watch; <= store revision
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
|
||||
if len(wr.Events) > 0 {
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
ws.initReq.rev = nextRev
|
||||
|
||||
// created event is already sent above,
|
||||
// watcher should not post duplicate events
|
||||
if wr.Created {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO pause channel if buffer gets too large
|
||||
ws.buf = append(ws.buf, wr)
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
case <-ws.initReq.ctx.Done():
|
||||
return
|
||||
case <-resumec:
|
||||
resuming = true
|
||||
return
|
||||
}
|
||||
}
|
||||
// lazily send cancel message if events on missing id
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
// mark all substreams as resuming
|
||||
close(w.resumec)
|
||||
w.resumec = make(chan struct{})
|
||||
w.joinSubstreams()
|
||||
for _, ws := range w.substreams {
|
||||
ws.id = -1
|
||||
w.resuming = append(w.resuming, ws)
|
||||
}
|
||||
// strip out nils, if any
|
||||
var resuming []*watcherStream
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
resuming = append(resuming, ws)
|
||||
}
|
||||
}
|
||||
w.resuming = resuming
|
||||
w.substreams = make(map[int64]*watcherStream)
|
||||
|
||||
// connect to grpc stream while accepting watcher cancelation
|
||||
stopc := make(chan struct{})
|
||||
donec := w.waitCancelSubstreams(stopc)
|
||||
wc, err := w.openWatchClient()
|
||||
close(stopc)
|
||||
<-donec
|
||||
|
||||
// serve all non-closing streams, even if there's a client error
|
||||
// so that the teardown path can shutdown the streams as expected.
|
||||
for _, ws := range w.resuming {
|
||||
if ws.closing {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
|
||||
// receive data from new grpc stream
|
||||
go w.serveWatchClient(wc)
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(w.resuming))
|
||||
donec := make(chan struct{})
|
||||
for i := range w.resuming {
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ws.initReq.ctx.Done():
|
||||
// closed ws will be removed from resuming
|
||||
ws.closing = true
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
w.wg.Add(1)
|
||||
go func() {
|
||||
defer w.wg.Done()
|
||||
w.closingc <- ws
|
||||
}()
|
||||
case <-stopc:
|
||||
}
|
||||
}(w.resuming[i])
|
||||
}
|
||||
go func() {
|
||||
defer close(donec)
|
||||
wg.Wait()
|
||||
}()
|
||||
return donec
|
||||
}
|
||||
|
||||
// joinSubstream waits for all substream goroutines to complete
|
||||
func (w *watchGrpcStream) joinSubstreams() {
|
||||
for _, ws := range w.substreams {
|
||||
<-ws.donec
|
||||
}
|
||||
for _, ws := range w.resuming {
|
||||
if ws != nil {
|
||||
<-ws.donec
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// openWatchClient retries opening a watchclient until retryConnection fails
|
||||
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
|
||||
for {
|
||||
select {
|
||||
case <-w.ctx.Done():
|
||||
if err == nil {
|
||||
return nil, w.ctx.Err()
|
||||
}
|
||||
return nil, err
|
||||
default:
|
||||
}
|
||||
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
|
||||
break
|
||||
}
|
||||
if isHaltErr(w.ctx, err) {
|
||||
return nil, v3rpc.Error(err)
|
||||
}
|
||||
}
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
|
||||
func (wr *watchRequest) toPB() *pb.WatchRequest {
|
||||
req := &pb.WatchCreateRequest{
|
||||
StartRevision: wr.rev,
|
||||
Key: []byte(wr.key),
|
||||
RangeEnd: []byte(wr.end),
|
||||
ProgressNotify: wr.progressNotify,
|
||||
Filters: wr.filters,
|
||||
PrevKv: wr.prevKV,
|
||||
}
|
||||
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
|
||||
return &pb.WatchRequest{RequestUnion: cr}
|
||||
}
|
55
vendor/github.com/coreos/etcd/clientv3.old/watch_test.go
generated
vendored
Normal file
55
vendor/github.com/coreos/etcd/clientv3.old/watch_test.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
)
|
||||
|
||||
func TestEvent(t *testing.T) {
|
||||
tests := []struct {
|
||||
ev *Event
|
||||
isCreate bool
|
||||
isModify bool
|
||||
}{{
|
||||
ev: &Event{
|
||||
Type: EventTypePut,
|
||||
Kv: &mvccpb.KeyValue{
|
||||
CreateRevision: 3,
|
||||
ModRevision: 3,
|
||||
},
|
||||
},
|
||||
isCreate: true,
|
||||
}, {
|
||||
ev: &Event{
|
||||
Type: EventTypePut,
|
||||
Kv: &mvccpb.KeyValue{
|
||||
CreateRevision: 3,
|
||||
ModRevision: 4,
|
||||
},
|
||||
},
|
||||
isModify: true,
|
||||
}}
|
||||
for i, tt := range tests {
|
||||
if tt.isCreate && !tt.ev.IsCreate() {
|
||||
t.Errorf("#%d: event should be Create event", i)
|
||||
}
|
||||
if tt.isModify && !tt.ev.IsModify() {
|
||||
t.Errorf("#%d: event should be Modify event", i)
|
||||
}
|
||||
}
|
||||
}
|
87
vendor/github.com/coreos/etcd/clientv3.old/yaml/config.go
generated
vendored
Normal file
87
vendor/github.com/coreos/etcd/clientv3.old/yaml/config.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package yaml handles yaml-formatted clientv3 configuration data.
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/tlsutil"
|
||||
)
|
||||
|
||||
type yamlConfig struct {
|
||||
clientv3.Config
|
||||
|
||||
InsecureTransport bool `json:"insecure-transport"`
|
||||
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
|
||||
Certfile string `json:"cert-file"`
|
||||
Keyfile string `json:"key-file"`
|
||||
CAfile string `json:"ca-file"`
|
||||
}
|
||||
|
||||
// NewConfig creates a new clientv3.Config from a yaml file.
|
||||
func NewConfig(fpath string) (*clientv3.Config, error) {
|
||||
b, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
yc := &yamlConfig{}
|
||||
|
||||
err = yaml.Unmarshal(b, yc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if yc.InsecureTransport {
|
||||
return &yc.Config, nil
|
||||
}
|
||||
|
||||
var (
|
||||
cert *tls.Certificate
|
||||
cp *x509.CertPool
|
||||
)
|
||||
|
||||
if yc.Certfile != "" && yc.Keyfile != "" {
|
||||
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if yc.CAfile != "" {
|
||||
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tlscfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
|
||||
RootCAs: cp,
|
||||
}
|
||||
if cert != nil {
|
||||
tlscfg.Certificates = []tls.Certificate{*cert}
|
||||
}
|
||||
yc.Config.TLS = tlscfg
|
||||
|
||||
return &yc.Config, nil
|
||||
}
|
126
vendor/github.com/coreos/etcd/clientv3.old/yaml/config_test.go
generated
vendored
Normal file
126
vendor/github.com/coreos/etcd/clientv3.old/yaml/config_test.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
certPath = "../../integration/fixtures/server.crt"
|
||||
privateKeyPath = "../../integration/fixtures/server.key.insecure"
|
||||
caPath = "../../integration/fixtures/ca.crt"
|
||||
)
|
||||
|
||||
func TestConfigFromFile(t *testing.T) {
|
||||
tests := []struct {
|
||||
ym *yamlConfig
|
||||
|
||||
werr bool
|
||||
}{
|
||||
{
|
||||
&yamlConfig{},
|
||||
false,
|
||||
},
|
||||
{
|
||||
&yamlConfig{
|
||||
InsecureTransport: true,
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
&yamlConfig{
|
||||
Keyfile: privateKeyPath,
|
||||
Certfile: certPath,
|
||||
CAfile: caPath,
|
||||
InsecureSkipTLSVerify: true,
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
&yamlConfig{
|
||||
Keyfile: "bad",
|
||||
Certfile: "bad",
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
&yamlConfig{
|
||||
Keyfile: privateKeyPath,
|
||||
Certfile: certPath,
|
||||
CAfile: "bad",
|
||||
},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
tmpfile, err := ioutil.TempFile("", "clientcfg")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
b, err := yaml.Marshal(tt.ym)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = tmpfile.Write(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = tmpfile.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg, cerr := NewConfig(tmpfile.Name())
|
||||
if cerr != nil && !tt.werr {
|
||||
t.Errorf("#%d: err = %v, want %v", i, cerr, tt.werr)
|
||||
continue
|
||||
}
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cfg.Endpoints, tt.ym.Endpoints) {
|
||||
t.Errorf("#%d: endpoint = %v, want %v", i, cfg.Endpoints, tt.ym.Endpoints)
|
||||
}
|
||||
|
||||
if tt.ym.InsecureTransport != (cfg.TLS == nil) {
|
||||
t.Errorf("#%d: insecureTransport = %v, want %v", i, cfg.TLS == nil, tt.ym.InsecureTransport)
|
||||
}
|
||||
|
||||
if !tt.ym.InsecureTransport {
|
||||
if tt.ym.Certfile != "" && len(cfg.TLS.Certificates) == 0 {
|
||||
t.Errorf("#%d: failed to load in cert", i)
|
||||
}
|
||||
if tt.ym.CAfile != "" && cfg.TLS.RootCAs == nil {
|
||||
t.Errorf("#%d: failed to load in ca cert", i)
|
||||
}
|
||||
if cfg.TLS.InsecureSkipVerify != tt.ym.InsecureSkipTLSVerify {
|
||||
t.Errorf("#%d: skipTLSVeify = %v, want %v", i, cfg.TLS.InsecureSkipVerify, tt.ym.InsecureSkipTLSVerify)
|
||||
}
|
||||
}
|
||||
|
||||
os.Remove(tmpfile.Name())
|
||||
}
|
||||
}
|
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
Normal file
85
vendor/github.com/coreos/etcd/clientv3/README.md
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
# etcd/clientv3
|
||||
|
||||
[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
|
||||
|
||||
`etcd/clientv3` is the official Go etcd client for v3.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/coreos/etcd/clientv3
|
||||
```
|
||||
|
||||
## Get started
|
||||
|
||||
Create client using `clientv3.New`:
|
||||
|
||||
```go
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
if err != nil {
|
||||
// handle error!
|
||||
}
|
||||
defer cli.Close()
|
||||
```
|
||||
|
||||
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
|
||||
[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
|
||||
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
|
||||
pass `context.WithTimeout` to APIs:
|
||||
|
||||
```go
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
resp, err := cli.Put(ctx, "sample_key", "sample_value")
|
||||
cancel()
|
||||
if err != nil {
|
||||
// handle error!
|
||||
}
|
||||
// use the response
|
||||
```
|
||||
|
||||
etcd uses `cmd/vendor` directory to store external dependencies, which are
|
||||
to be compiled into etcd release binaries. `client` can be imported without
|
||||
vendoring. For full compatibility, it is recommended to vendor builds using
|
||||
etcd's vendored packages, using tools like godep, as in
|
||||
[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
|
||||
For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
|
||||
|
||||
## Error Handling
|
||||
|
||||
etcd client returns 2 types of errors:
|
||||
|
||||
1. context error: canceled or deadline exceeded.
|
||||
2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
|
||||
|
||||
Here is the example code to handle client errors:
|
||||
|
||||
```go
|
||||
resp, err := cli.Put(ctx, "", "")
|
||||
if err != nil {
|
||||
switch err {
|
||||
case context.Canceled:
|
||||
log.Fatalf("ctx is canceled by another routine: %v", err)
|
||||
case context.DeadlineExceeded:
|
||||
log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
|
||||
case rpctypes.ErrEmptyKey:
|
||||
log.Fatalf("client-side error: %v", err)
|
||||
default:
|
||||
log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
|
||||
|
||||
## Namespacing
|
||||
|
||||
The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
|
||||
|
||||
## Examples
|
||||
|
||||
More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
|
57
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
57
vendor/github.com/coreos/etcd/clientv3/auth.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -100,60 +101,65 @@ type Auth interface {
|
||||
}
|
||||
|
||||
type auth struct {
|
||||
remote pb.AuthClient
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewAuth(c *Client) Auth {
|
||||
return &auth{remote: pb.NewAuthClient(c.ActiveConnection())}
|
||||
api := &auth{remote: RetryAuthClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
|
||||
return (*AuthEnableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
|
||||
return (*AuthDisableResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
|
||||
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
|
||||
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
|
||||
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
|
||||
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
|
||||
return (*AuthUserListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
|
||||
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
|
||||
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
|
||||
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
|
||||
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -163,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran
|
||||
RangeEnd: []byte(rangeEnd),
|
||||
PermType: authpb.Permission_Type(permType),
|
||||
}
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
|
||||
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
|
||||
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
|
||||
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
|
||||
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
|
||||
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
|
||||
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
|
||||
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -196,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) {
|
||||
}
|
||||
|
||||
type authenticator struct {
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
conn *grpc.ClientConn // conn in-use
|
||||
remote pb.AuthClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
|
||||
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
|
||||
return (*AuthenticateResponse)(resp), toErr(ctx, err)
|
||||
}
|
||||
|
||||
@ -209,14 +216,18 @@ func (auth *authenticator) close() {
|
||||
auth.conn.Close()
|
||||
}
|
||||
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
|
||||
func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &authenticator{
|
||||
api := &authenticator{
|
||||
conn: conn,
|
||||
remote: pb.NewAuthClient(conn),
|
||||
}, nil
|
||||
}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api, nil
|
||||
}
|
||||
|
102
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
102
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@ -33,6 +33,7 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -52,21 +53,22 @@ type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
dialerrc chan error
|
||||
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
cfg Config
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *healthBalancer
|
||||
mu *sync.Mutex
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Username is a username for authentication
|
||||
// Username is a user name for authentication.
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
// Password is a password for authentication.
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
@ -117,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) {
|
||||
|
||||
// SetEndpoints updates client's endpoints.
|
||||
func (c *Client) SetEndpoints(eps ...string) {
|
||||
c.mu.Lock()
|
||||
c.cfg.Endpoints = eps
|
||||
c.balancer.updateAddrs(eps)
|
||||
c.mu.Unlock()
|
||||
c.balancer.updateAddrs(eps...)
|
||||
|
||||
// updating notifyCh can trigger new connections,
|
||||
// need update addrs if all connections are down
|
||||
// or addrs does not include pinAddr.
|
||||
c.balancer.mu.RLock()
|
||||
update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
|
||||
c.balancer.mu.RUnlock()
|
||||
if update {
|
||||
select {
|
||||
case c.balancer.updateAddrsC <- notifyNext:
|
||||
case <-c.balancer.stopc:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
|
||||
@ -145,8 +162,10 @@ func (c *Client) autoSync() {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(c.cfg.AutoSyncInterval):
|
||||
ctx, _ := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
if err := c.Sync(ctx); err != nil && err != c.ctx.Err() {
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
err := c.Sync(ctx)
|
||||
cancel()
|
||||
if err != nil && err != c.ctx.Err() {
|
||||
logger.Println("Auto sync endpoints failed:", err)
|
||||
}
|
||||
}
|
||||
@ -175,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
host = endpoint
|
||||
url, uerr := url.Parse(endpoint)
|
||||
if uerr != nil || !strings.Contains(endpoint, "://") {
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
scheme = url.Scheme
|
||||
|
||||
@ -189,7 +208,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
|
||||
default:
|
||||
proto, host = "", ""
|
||||
}
|
||||
return
|
||||
return proto, host, scheme
|
||||
}
|
||||
|
||||
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
|
||||
@ -208,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden
|
||||
default:
|
||||
creds = nil
|
||||
}
|
||||
return
|
||||
return creds
|
||||
}
|
||||
|
||||
// dialSetupOpts gives the dial opts prior to any authentication
|
||||
@ -218,18 +237,15 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts
|
||||
}
|
||||
if c.cfg.DialKeepAliveTime > 0 {
|
||||
params := keepalive.ClientParameters{
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
}
|
||||
// Only relevant when KeepAliveTime is non-zero
|
||||
if c.cfg.DialKeepAliveTimeout > 0 {
|
||||
params.Timeout = c.cfg.DialKeepAliveTimeout
|
||||
Time: c.cfg.DialKeepAliveTime,
|
||||
Timeout: c.cfg.DialKeepAliveTimeout,
|
||||
}
|
||||
opts = append(opts, grpc.WithKeepaliveParams(params))
|
||||
}
|
||||
opts = append(opts, dopts...)
|
||||
|
||||
f := func(host string, t time.Duration) (net.Conn, error) {
|
||||
proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host))
|
||||
proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
|
||||
if host == "" && endpoint != "" {
|
||||
// dialing an endpoint not in the balancer; use
|
||||
// endpoint passed into dial
|
||||
@ -281,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -322,7 +338,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
if err != nil {
|
||||
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
err = context.DeadlineExceeded
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@ -371,15 +387,37 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
creds: creds,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
mu: new(sync.Mutex),
|
||||
callOpts: defaultCallOpts,
|
||||
}
|
||||
if cfg.Username != "" && cfg.Password != "" {
|
||||
client.Username = cfg.Username
|
||||
client.Password = cfg.Password
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
|
||||
if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
|
||||
return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
callOpts := []grpc.CallOption{
|
||||
defaultFailFast,
|
||||
defaultMaxCallSendMsgSize,
|
||||
defaultMaxCallRecvMsgSize,
|
||||
}
|
||||
if cfg.MaxCallSendMsgSize > 0 {
|
||||
callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
|
||||
}
|
||||
if cfg.MaxCallRecvMsgSize > 0 {
|
||||
callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
|
||||
}
|
||||
client.callOpts = callOpts
|
||||
}
|
||||
|
||||
client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
|
||||
return grpcHealthCheck(client, ep)
|
||||
})
|
||||
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
// use Endpoints[0] so that for https:// without any tls config given, then
|
||||
// grpc will assume the ServerName is in the endpoint.
|
||||
// grpc will assume the certificate server name is the endpoint host.
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
@ -387,21 +425,19 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
hasConn := false
|
||||
waitc := time.After(cfg.DialTimeout)
|
||||
select {
|
||||
case <-client.balancer.readyc:
|
||||
case <-client.balancer.ready():
|
||||
hasConn = true
|
||||
case <-ctx.Done():
|
||||
case <-waitc:
|
||||
}
|
||||
if !hasConn {
|
||||
err := grpc.ErrClientConnTimeout
|
||||
err := context.DeadlineExceeded
|
||||
select {
|
||||
case err = <-client.dialerrc:
|
||||
default:
|
||||
@ -436,7 +472,7 @@ func (c *Client) checkVersion() (err error) {
|
||||
errc := make(chan error, len(c.cfg.Endpoints))
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
}
|
||||
wg.Add(len(c.cfg.Endpoints))
|
||||
for _, ep := range c.cfg.Endpoints {
|
||||
@ -451,7 +487,7 @@ func (c *Client) checkVersion() (err error) {
|
||||
vs := strings.Split(resp.Version, ".")
|
||||
maj, min := 0, 0
|
||||
if len(vs) >= 2 {
|
||||
maj, rerr = strconv.Atoi(vs[0])
|
||||
maj, _ = strconv.Atoi(vs[0])
|
||||
min, rerr = strconv.Atoi(vs[1])
|
||||
}
|
||||
if maj < 3 || (maj == 3 && min < 2) {
|
||||
@ -483,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
// Unavailable codes mean the system will be right back.
|
||||
// (e.g., can't connect, lost leader)
|
||||
// Treat Internal codes as if something failed, leaving the
|
||||
// system in an inconsistent state, but retrying could make progress.
|
||||
// (e.g., failed in middle of send, corrupted frame)
|
||||
// TODO: are permanent Internal errors possible from grpc?
|
||||
return code != codes.Unavailable && code != codes.Internal
|
||||
return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
|
||||
}
|
||||
|
||||
func toErr(ctx context.Context, err error) error {
|
||||
@ -501,7 +537,8 @@ func toErr(ctx context.Context, err error) error {
|
||||
if _, ok := err.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
code := grpc.Code(err)
|
||||
ev, _ := status.FromError(err)
|
||||
code := ev.Code()
|
||||
switch code {
|
||||
case codes.DeadlineExceeded:
|
||||
fallthrough
|
||||
@ -510,7 +547,6 @@ func toErr(ctx context.Context, err error) error {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case codes.Unavailable:
|
||||
err = ErrNoAvailableEndpoints
|
||||
case codes.FailedPrecondition:
|
||||
err = grpc.ErrClientConnClosing
|
||||
}
|
||||
|
10
vendor/github.com/coreos/etcd/clientv3/client_test.go
generated
vendored
10
vendor/github.com/coreos/etcd/clientv3/client_test.go
generated
vendored
@ -22,8 +22,8 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestDialCancel(t *testing.T) {
|
||||
@ -45,7 +45,7 @@ func TestDialCancel(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// connect to ipv4 blackhole so dial blocks
|
||||
// connect to ipv4 black hole so dial blocks
|
||||
c.SetEndpoints("http://254.0.0.1:12345")
|
||||
|
||||
// issue Get to force redial attempts
|
||||
@ -97,7 +97,7 @@ func TestDialTimeout(t *testing.T) {
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
// without timeout, dial continues forever on ipv4 black hole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
@ -117,8 +117,8 @@ func TestDialTimeout(t *testing.T) {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
if err != context.DeadlineExceeded {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
5
vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go
generated
vendored
5
vendor/github.com/coreos/etcd/clientv3/clientv3util/example_key_test.go
generated
vendored
@ -15,11 +15,12 @@
|
||||
package clientv3util_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/clientv3util"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleKeyExists_put() {
|
||||
@ -33,7 +34,7 @@ func ExampleKeyExists_put() {
|
||||
kvc := clientv3.NewKV(cli)
|
||||
|
||||
// perform a put only if key is missing
|
||||
// It is useful to do the check (transactionally) to avoid overwriting
|
||||
// It is useful to do the check atomically to avoid overwriting
|
||||
// the existing key which would generate potentially unwanted events,
|
||||
// unless of course you wanted to do an overwrite no matter what.
|
||||
_, err = kvc.Txn(context.Background()).
|
||||
|
48
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
48
vendor/github.com/coreos/etcd/clientv3/cluster.go
generated
vendored
@ -16,6 +16,7 @@ package clientv3
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -43,20 +44,29 @@ type Cluster interface {
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
remote pb.ClusterClient
|
||||
remote pb.ClusterClient
|
||||
callOpts []grpc.CallOption
|
||||
}
|
||||
|
||||
func NewCluster(c *Client) Cluster {
|
||||
return &cluster{remote: RetryClusterClient(c)}
|
||||
api := &cluster{remote: RetryClusterClient(c)}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient) Cluster {
|
||||
return &cluster{remote: remote}
|
||||
func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
|
||||
api := &cluster{remote: remote}
|
||||
if c != nil {
|
||||
api.callOpts = c.callOpts
|
||||
}
|
||||
return api
|
||||
}
|
||||
|
||||
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
|
||||
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberAdd(ctx, r)
|
||||
resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -65,7 +75,7 @@ func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAdd
|
||||
|
||||
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
|
||||
r := &pb.MemberRemoveRequest{ID: id}
|
||||
resp, err := c.remote.MemberRemove(ctx, r)
|
||||
resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
|
||||
if err != nil {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
@ -74,27 +84,19 @@ func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveRes
|
||||
|
||||
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
|
||||
// it is safe to retry on update.
|
||||
for {
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
|
||||
resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberUpdateResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
||||
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
|
||||
// it is safe to retry on list.
|
||||
for {
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
if isHaltErr(ctx, err) {
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
|
||||
if err == nil {
|
||||
return (*MemberListResponse)(resp), nil
|
||||
}
|
||||
return nil, toErr(ctx, err)
|
||||
}
|
||||
|
6
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
6
vendor/github.com/coreos/etcd/clientv3/compact_op.go
generated
vendored
@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest {
|
||||
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
|
||||
}
|
||||
|
||||
// WithCompactPhysical makes compact RPC call wait until
|
||||
// the compaction is physically applied to the local database
|
||||
// such that compacted entries are totally removed from the
|
||||
// backend database.
|
||||
// WithCompactPhysical makes Compact wait until all compacted entries are
|
||||
// removed from the etcd server's storage.
|
||||
func WithCompactPhysical() CompactOption {
|
||||
return func(op *CompactOp) { op.physical = true }
|
||||
}
|
||||
|
22
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
22
vendor/github.com/coreos/etcd/clientv3/compare.go
generated
vendored
@ -99,18 +99,7 @@ func (cmp *Cmp) ValueBytes() []byte {
|
||||
// WithValueBytes sets the byte slice for the comparison's value.
|
||||
func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
|
||||
|
||||
// WithRange sets the comparison to scan the range [key, end).
|
||||
func (cmp Cmp) WithRange(end string) Cmp {
|
||||
cmp.RangeEnd = []byte(end)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// WithPrefix sets the comparison to scan all keys prefixed by the key.
|
||||
func (cmp Cmp) WithPrefix() Cmp {
|
||||
cmp.RangeEnd = getPrefix(cmp.Key)
|
||||
return cmp
|
||||
}
|
||||
|
||||
// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
|
||||
func mustInt64(val interface{}) int64 {
|
||||
if v, ok := val.(int64); ok {
|
||||
return v
|
||||
@ -120,3 +109,12 @@ func mustInt64(val interface{}) int64 {
|
||||
}
|
||||
panic("bad value")
|
||||
}
|
||||
|
||||
// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
|
||||
// int64 otherwise.
|
||||
func mustInt64orLeaseID(val interface{}) int64 {
|
||||
if v, ok := val.(LeaseID); ok {
|
||||
return int64(v)
|
||||
}
|
||||
return mustInt64(val)
|
||||
}
|
||||
|
9
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
9
vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -185,12 +186,12 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
// only accept PUTs; a DELETE will make observe() spin
|
||||
// only accept puts; a delete will make observe() spin
|
||||
for _, ev := range wr.Events {
|
||||
if ev.Type == mvccpb.PUT {
|
||||
hdr, kv = &wr.Header, ev.Kv
|
||||
// may have multiple revs; hdr.rev = the last rev
|
||||
// set to kv's rev in case batch has multiple PUTs
|
||||
// set to kv's rev in case batch has multiple Puts
|
||||
hdr.Revision = kv.ModRevision
|
||||
break
|
||||
}
|
||||
@ -213,6 +214,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
for !keyDeleted {
|
||||
wr, ok := <-wch
|
||||
if !ok {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
for _, ev := range wr.Events {
|
||||
@ -225,6 +227,7 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
|
||||
select {
|
||||
case ch <- *resp:
|
||||
case <-cctx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -240,4 +243,4 @@ func (e *Election) Key() string { return e.leaderKey }
|
||||
func (e *Election) Rev() int64 { return e.leaderRev }
|
||||
|
||||
// Header is the response header from the last successful election proposal.
|
||||
func (m *Election) Header() *pb.ResponseHeader { return m.hdr }
|
||||
func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
|
||||
|
1
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
1
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -53,6 +54,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
|
||||
ctx, cancel := context.WithCancel(ops.ctx)
|
||||
keepAlive, err := client.KeepAlive(ctx, id)
|
||||
if err != nil || keepAlive == nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
6
vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
||||
"math"
|
||||
|
||||
v3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -193,11 +194,12 @@ func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// first returns the store revision from the first fetch
|
||||
func (rs readSet) first() int64 {
|
||||
ret := int64(math.MaxInt64 - 1)
|
||||
for _, resp := range rs {
|
||||
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret {
|
||||
ret = resp.Kvs[0].ModRevision
|
||||
if rev := resp.Header.Revision; rev < ret {
|
||||
ret = rev
|
||||
}
|
||||
}
|
||||
return ret
|
||||
|
15
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
15
vendor/github.com/coreos/etcd/clientv3/config.go
generated
vendored
@ -41,10 +41,23 @@ type Config struct {
|
||||
// keep-alive probe. If the response is not received in this time, the connection is closed.
|
||||
DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
|
||||
|
||||
// MaxCallSendMsgSize is the client-side request send limit in bytes.
|
||||
// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
|
||||
// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallSendMsgSize int
|
||||
|
||||
// MaxCallRecvMsgSize is the client-side response receive limit.
|
||||
// If 0, it defaults to "math.MaxInt32", because range response can
|
||||
// easily exceed request send limits.
|
||||
// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
|
||||
// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
|
||||
MaxCallRecvMsgSize int
|
||||
|
||||
// TLS holds the client secure credentials, if any.
|
||||
TLS *tls.Config
|
||||
|
||||
// Username is a username for authentication.
|
||||
// Username is a user name for authentication.
|
||||
Username string `json:"username"`
|
||||
|
||||
// Password is a password for authentication.
|
||||
|
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/doc.go
generated
vendored
@ -28,7 +28,7 @@
|
||||
// Make sure to close the client after using it. If the client is not closed, the
|
||||
// connection will have leaky goroutines.
|
||||
//
|
||||
// To specify client request timeout, pass context.WithTimeout to APIs:
|
||||
// To specify a client request timeout, wrap the context with context.WithTimeout:
|
||||
//
|
||||
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
|
||||
|
1
vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/example_cluster_test.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
8
vendor/github.com/coreos/etcd/clientv3/example_kv_test.go
generated
vendored
8
vendor/github.com/coreos/etcd/clientv3/example_kv_test.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -236,8 +237,11 @@ func ExampleKV_txn() {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
|
||||
_, err = kvc.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")). // txn value comparisons are lexical
|
||||
Then(clientv3.OpPut("key", "XYZ")). // this runs, since 'xyz' > 'abc'
|
||||
// txn value comparisons are lexical
|
||||
If(clientv3.Compare(clientv3.Value("key"), ">", "abc")).
|
||||
// the "Then" runs, since "xyz" > "abc"
|
||||
Then(clientv3.OpPut("key", "XYZ")).
|
||||
// the "Else" does not run
|
||||
Else(clientv3.OpPut("key", "ABC")).
|
||||
Commit()
|
||||
cancel()
|
||||
|
1
vendor/github.com/coreos/etcd/clientv3/example_lease_test.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/example_lease_test.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
18
vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go
generated
vendored
18
vendor/github.com/coreos/etcd/clientv3/example_maintenence_test.go
generated
vendored
@ -18,9 +18,8 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func ExampleMaintenance_status() {
|
||||
@ -34,20 +33,15 @@ func ExampleMaintenance_status() {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// resp, err := cli.Status(context.Background(), ep)
|
||||
//
|
||||
// or
|
||||
//
|
||||
mapi := clientv3.NewMaintenance(cli)
|
||||
resp, err := mapi.Status(context.Background(), ep)
|
||||
resp, err := cli.Status(context.Background(), ep)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("endpoint: %s / IsLeader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
fmt.Printf("endpoint: %s / Leader: %v\n", ep, resp.Header.MemberId == resp.Leader)
|
||||
}
|
||||
// endpoint: localhost:2379 / IsLeader: false
|
||||
// endpoint: localhost:22379 / IsLeader: false
|
||||
// endpoint: localhost:32379 / IsLeader: true
|
||||
// endpoint: localhost:2379 / Leader: false
|
||||
// endpoint: localhost:22379 / Leader: false
|
||||
// endpoint: localhost:32379 / Leader: true
|
||||
}
|
||||
|
||||
func ExampleMaintenance_defragment() {
|
||||
|
9
vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go
generated
vendored
9
vendor/github.com/coreos/etcd/clientv3/example_metrics_test.go
generated
vendored
@ -43,10 +43,10 @@ func ExampleClient_metrics() {
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// get a key so it shows up in the metrics as a range rpc
|
||||
// get a key so it shows up in the metrics as a range RPC
|
||||
cli.Get(context.TODO(), "test_key")
|
||||
|
||||
// listen for all prometheus metrics
|
||||
// listen for all Prometheus metrics
|
||||
ln, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@ -61,7 +61,7 @@ func ExampleClient_metrics() {
|
||||
<-donec
|
||||
}()
|
||||
|
||||
// make an http request to fetch all prometheus metrics
|
||||
// make an http request to fetch all Prometheus metrics
|
||||
url := "http://" + ln.Addr().String() + "/metrics"
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
@ -80,5 +80,6 @@ func ExampleClient_metrics() {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Output: grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
// Output:
|
||||
// grpc_client_started_total{grpc_method="Range",grpc_service="etcdserverpb.KV",grpc_type="unary"} 1
|
||||
}
|
||||
|
7
vendor/github.com/coreos/etcd/clientv3/example_test.go
generated
vendored
7
vendor/github.com/coreos/etcd/clientv3/example_test.go
generated
vendored
@ -16,12 +16,14 @@ package clientv3_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -31,8 +33,7 @@ var (
|
||||
)
|
||||
|
||||
func Example() {
|
||||
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
|
||||
clientv3.SetLogger(plog)
|
||||
clientv3.SetLogger(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
|
1
vendor/github.com/coreos/etcd/clientv3/example_watch_test.go
generated
vendored
1
vendor/github.com/coreos/etcd/clientv3/example_watch_test.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
46
vendor/github.com/coreos/etcd/clientv3/grpc_options.go
generated
vendored
Normal file
46
vendor/github.com/coreos/etcd/clientv3/grpc_options.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
// Disable gRPC internal retrial logic
|
||||
// TODO: enable when gRPC retry is stable (FailFast=false)
|
||||
// Reference:
|
||||
// - https://github.com/grpc/grpc-go/issues/1532
|
||||
// - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
|
||||
defaultFailFast = grpc.FailFast(true)
|
||||
|
||||
// client-side request send limit, gRPC default is math.MaxInt32
|
||||
// Make sure that "client-side send limit < server-side default send/recv limit"
|
||||
// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
|
||||
defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
|
||||
|
||||
// client-side response receive limit, gRPC default is 4MB
|
||||
// Make sure that "client-side receive limit >= server-side default send/recv limit"
|
||||
// because range response can easily exceed request send limits
|
||||
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
|
||||
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
|
||||
)
|
||||
|
||||
// defaultCallOpts defines a list of default "gRPC.CallOption".
|
||||
// Some options are exposed to "clientv3.Config".
|
||||
// Defaults will be overridden by the settings in "clientv3.Config".
|
||||
var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
|
627
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
Normal file
627
vendor/github.com/coreos/etcd/clientv3/health_balancer.go
generated
vendored
Normal file
@ -0,0 +1,627 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package clientv3
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
healthpb "google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
minHealthRetryDuration = 3 * time.Second
|
||||
unknownService = "unknown service grpc.health.v1.Health"
|
||||
)
|
||||
|
||||
// ErrNoAddrAvilable is returned by Get() when the balancer does not have
|
||||
// any active connection to endpoints at the time.
|
||||
// This error is returned only when opts.BlockingWait is true.
|
||||
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
|
||||
|
||||
type healthCheckFunc func(ep string) (bool, error)
|
||||
|
||||
type notifyMsg int
|
||||
|
||||
const (
|
||||
notifyReset notifyMsg = iota
|
||||
notifyNext
|
||||
)
|
||||
|
||||
// healthBalancer does the bare minimum to expose multiple eps
|
||||
// to the grpc reconnection code path
|
||||
type healthBalancer struct {
|
||||
// addrs are the client's endpoint addresses for grpc
|
||||
addrs []grpc.Address
|
||||
|
||||
// eps holds the raw endpoints from the client
|
||||
eps []string
|
||||
|
||||
// notifyCh notifies grpc of the set of addresses for connecting
|
||||
notifyCh chan []grpc.Address
|
||||
|
||||
// readyc closes once the first connection is up
|
||||
readyc chan struct{}
|
||||
readyOnce sync.Once
|
||||
|
||||
// healthCheck checks an endpoint's health.
|
||||
healthCheck healthCheckFunc
|
||||
healthCheckTimeout time.Duration
|
||||
|
||||
unhealthyMu sync.RWMutex
|
||||
unhealthyHostPorts map[string]time.Time
|
||||
|
||||
// mu protects all fields below.
|
||||
mu sync.RWMutex
|
||||
|
||||
// upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
|
||||
upc chan struct{}
|
||||
|
||||
// downc closes when grpc calls down() on pinAddr
|
||||
downc chan struct{}
|
||||
|
||||
// stopc is closed to signal updateNotifyLoop should stop.
|
||||
stopc chan struct{}
|
||||
stopOnce sync.Once
|
||||
wg sync.WaitGroup
|
||||
|
||||
// donec closes when all goroutines are exited
|
||||
donec chan struct{}
|
||||
|
||||
// updateAddrsC notifies updateNotifyLoop to update addrs.
|
||||
updateAddrsC chan notifyMsg
|
||||
|
||||
// grpc issues TLS cert checks using the string passed into dial so
|
||||
// that string must be the host. To recover the full scheme://host URL,
|
||||
// have a map from hosts to the original endpoint.
|
||||
hostPort2ep map[string]string
|
||||
|
||||
// pinAddr is the currently pinned address; set to the empty string on
|
||||
// initialization and shutdown.
|
||||
pinAddr string
|
||||
|
||||
closed bool
|
||||
}
|
||||
|
||||
func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
|
||||
notifyCh := make(chan []grpc.Address)
|
||||
addrs := eps2addrs(eps)
|
||||
hb := &healthBalancer{
|
||||
addrs: addrs,
|
||||
eps: eps,
|
||||
notifyCh: notifyCh,
|
||||
readyc: make(chan struct{}),
|
||||
healthCheck: hc,
|
||||
unhealthyHostPorts: make(map[string]time.Time),
|
||||
upc: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
downc: make(chan struct{}),
|
||||
donec: make(chan struct{}),
|
||||
updateAddrsC: make(chan notifyMsg),
|
||||
hostPort2ep: getHostPort2ep(eps),
|
||||
}
|
||||
if timeout < minHealthRetryDuration {
|
||||
timeout = minHealthRetryDuration
|
||||
}
|
||||
hb.healthCheckTimeout = timeout
|
||||
|
||||
close(hb.downc)
|
||||
go hb.updateNotifyLoop()
|
||||
hb.wg.Add(1)
|
||||
go func() {
|
||||
defer hb.wg.Done()
|
||||
hb.updateUnhealthy()
|
||||
}()
|
||||
return hb
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
|
||||
|
||||
func (b *healthBalancer) ConnectNotify() <-chan struct{} {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.upc
|
||||
}
|
||||
|
||||
func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
|
||||
|
||||
func (b *healthBalancer) endpoint(hostPort string) string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.hostPort2ep[hostPort]
|
||||
}
|
||||
|
||||
func (b *healthBalancer) pinned() string {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.pinAddr
|
||||
}
|
||||
|
||||
func (b *healthBalancer) hostPortError(hostPort string, err error) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts[hostPort] = time.Now()
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
|
||||
if b.endpoint(hostPort) == "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
delete(b.unhealthyHostPorts, hostPort)
|
||||
b.unhealthyMu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) countUnhealthy() (count int) {
|
||||
b.unhealthyMu.RLock()
|
||||
count = len(b.unhealthyHostPorts)
|
||||
b.unhealthyMu.RUnlock()
|
||||
return count
|
||||
}
|
||||
|
||||
func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
|
||||
b.unhealthyMu.RLock()
|
||||
_, unhealthy = b.unhealthyHostPorts[hostPort]
|
||||
b.unhealthyMu.RUnlock()
|
||||
return unhealthy
|
||||
}
|
||||
|
||||
func (b *healthBalancer) cleanupUnhealthy() {
|
||||
b.unhealthyMu.Lock()
|
||||
for k, v := range b.unhealthyHostPorts {
|
||||
if time.Since(v) > b.healthCheckTimeout {
|
||||
delete(b.unhealthyHostPorts, k)
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
|
||||
unhealthyCnt := b.countUnhealthy()
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
hbAddrs := b.addrs
|
||||
if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
|
||||
liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
|
||||
for k := range b.hostPort2ep {
|
||||
liveHostPorts[k] = struct{}{}
|
||||
}
|
||||
return hbAddrs, liveHostPorts
|
||||
}
|
||||
|
||||
addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
|
||||
liveHostPorts := make(map[string]struct{}, len(addrs))
|
||||
for _, addr := range b.addrs {
|
||||
if !b.isUnhealthy(addr.Addr) {
|
||||
addrs = append(addrs, addr)
|
||||
liveHostPorts[addr.Addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
return addrs, liveHostPorts
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateUnhealthy() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(b.healthCheckTimeout):
|
||||
b.cleanupUnhealthy()
|
||||
pinned := b.pinned()
|
||||
if pinned == "" || b.isUnhealthy(pinned) {
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateAddrs(eps ...string) {
|
||||
np := getHostPort2ep(eps)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
match := len(np) == len(b.hostPort2ep)
|
||||
if match {
|
||||
for k, v := range np {
|
||||
if b.hostPort2ep[k] != v {
|
||||
match = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if match {
|
||||
// same endpoints, so no need to update address
|
||||
return
|
||||
}
|
||||
|
||||
b.hostPort2ep = np
|
||||
b.addrs, b.eps = eps2addrs(eps), eps
|
||||
|
||||
b.unhealthyMu.Lock()
|
||||
b.unhealthyHostPorts = make(map[string]time.Time)
|
||||
b.unhealthyMu.Unlock()
|
||||
}
|
||||
|
||||
func (b *healthBalancer) next() {
|
||||
b.mu.RLock()
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case b.updateAddrsC <- notifyNext:
|
||||
case <-b.stopc:
|
||||
}
|
||||
// wait until disconnect so new RPCs are not issued on old connection
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) updateNotifyLoop() {
|
||||
defer close(b.donec)
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
upc, downc, addr := b.upc, b.downc, b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// downc or upc should be closed
|
||||
select {
|
||||
case <-downc:
|
||||
downc = nil
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-upc:
|
||||
upc = nil
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case downc == nil && upc == nil:
|
||||
// stale
|
||||
select {
|
||||
case <-b.stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
case downc == nil:
|
||||
b.notifyAddrs(notifyReset)
|
||||
select {
|
||||
case <-upc:
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
case upc == nil:
|
||||
select {
|
||||
// close connections that are not the pinned address
|
||||
case b.notifyCh <- []grpc.Address{{Addr: addr}}:
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-downc:
|
||||
b.notifyAddrs(notifyReset)
|
||||
case msg := <-b.updateAddrsC:
|
||||
b.notifyAddrs(msg)
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
|
||||
if msg == notifyNext {
|
||||
select {
|
||||
case b.notifyCh <- []grpc.Address{}:
|
||||
case <-b.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
b.mu.RLock()
|
||||
pinAddr := b.pinAddr
|
||||
downc := b.downc
|
||||
b.mu.RUnlock()
|
||||
addrs, hostPorts := b.liveAddrs()
|
||||
|
||||
var waitDown bool
|
||||
if pinAddr != "" {
|
||||
_, ok := hostPorts[pinAddr]
|
||||
waitDown = !ok
|
||||
}
|
||||
|
||||
select {
|
||||
case b.notifyCh <- addrs:
|
||||
if waitDown {
|
||||
select {
|
||||
case <-downc:
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
case <-b.stopc:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Up(addr grpc.Address) func(error) {
|
||||
if !b.mayPin(addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
// gRPC might call Up after it called Close. We add this check
|
||||
// to "fix" it up at application layer. Otherwise, will panic
|
||||
// if b.upc is already closed.
|
||||
if b.closed {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// gRPC might call Up on a stale address.
|
||||
// Prevent updating pinAddr with a stale address.
|
||||
if !hasAddr(b.addrs, addr.Addr) {
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
if b.pinAddr != "" {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
|
||||
}
|
||||
return func(err error) {}
|
||||
}
|
||||
|
||||
// notify waiting Get()s and pin first connected address
|
||||
close(b.upc)
|
||||
b.downc = make(chan struct{})
|
||||
b.pinAddr = addr.Addr
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: pin %q", addr.Addr)
|
||||
}
|
||||
|
||||
// notify client that a connection is up
|
||||
b.readyOnce.Do(func() { close(b.readyc) })
|
||||
|
||||
return func(err error) {
|
||||
// If connected to a black hole endpoint or a killed server, the gRPC ping
|
||||
// timeout will induce a network I/O error, and retrying until success;
|
||||
// finding healthy endpoint on retry could take several timeouts and redials.
|
||||
// To avoid wasting retries, gray-list unhealthy endpoints.
|
||||
b.hostPortError(addr.Addr, err)
|
||||
|
||||
b.mu.Lock()
|
||||
b.upc = make(chan struct{})
|
||||
close(b.downc)
|
||||
b.pinAddr = ""
|
||||
b.mu.Unlock()
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *healthBalancer) mayPin(addr grpc.Address) bool {
|
||||
if b.endpoint(addr.Addr) == "" { // stale host:port
|
||||
return false
|
||||
}
|
||||
|
||||
b.unhealthyMu.RLock()
|
||||
unhealthyCnt := len(b.unhealthyHostPorts)
|
||||
failedTime, bad := b.unhealthyHostPorts[addr.Addr]
|
||||
b.unhealthyMu.RUnlock()
|
||||
|
||||
b.mu.RLock()
|
||||
skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
|
||||
b.mu.RUnlock()
|
||||
if skip || !bad {
|
||||
return true
|
||||
}
|
||||
|
||||
// prevent isolated member's endpoint from being infinitely retried, as follows:
|
||||
// 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
|
||||
// 2. balancer 'Up' unpins with grpc: failed with network I/O error
|
||||
// 3. grpc-healthcheck still SERVING, thus retry to pin
|
||||
// instead, return before grpc-healthcheck if failed within healthcheck timeout
|
||||
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
|
||||
if logger.V(4) {
|
||||
logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if ok, _ := b.healthCheck(addr.Addr); ok {
|
||||
b.removeUnhealthy(addr.Addr, "health check success")
|
||||
return true
|
||||
}
|
||||
|
||||
b.hostPortError(addr.Addr, errors.New("health check failed"))
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
|
||||
var (
|
||||
addr string
|
||||
closed bool
|
||||
)
|
||||
|
||||
// If opts.BlockingWait is false (for fail-fast RPCs), it should return
|
||||
// an address it has notified via Notify immediately instead of blocking.
|
||||
if !opts.BlockingWait {
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr == "" {
|
||||
return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
for {
|
||||
b.mu.RLock()
|
||||
ch := b.upc
|
||||
b.mu.RUnlock()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-b.donec:
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
case <-ctx.Done():
|
||||
return grpc.Address{Addr: ""}, nil, ctx.Err()
|
||||
}
|
||||
b.mu.RLock()
|
||||
closed = b.closed
|
||||
addr = b.pinAddr
|
||||
b.mu.RUnlock()
|
||||
// Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
|
||||
if closed {
|
||||
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
|
||||
}
|
||||
if addr != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return grpc.Address{Addr: addr}, func() {}, nil
|
||||
}
|
||||
|
||||
func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
|
||||
|
||||
func (b *healthBalancer) Close() error {
|
||||
b.mu.Lock()
|
||||
// In case gRPC calls close twice. TODO: remove the checking
|
||||
// when we are sure that gRPC wont call close twice.
|
||||
if b.closed {
|
||||
b.mu.Unlock()
|
||||
<-b.donec
|
||||
return nil
|
||||
}
|
||||
b.closed = true
|
||||
b.stopOnce.Do(func() { close(b.stopc) })
|
||||
b.pinAddr = ""
|
||||
|
||||
// In the case of following scenario:
|
||||
// 1. upc is not closed; no pinned address
|
||||
// 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
|
||||
// 3. client.conn.Close() calls balancer.Close(); closed = true
|
||||
// 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
|
||||
// we must close upc so Get() exits from blocking on upc
|
||||
select {
|
||||
case <-b.upc:
|
||||
default:
|
||||
// terminate all waiting Get()s
|
||||
close(b.upc)
|
||||
}
|
||||
|
||||
b.mu.Unlock()
|
||||
b.wg.Wait()
|
||||
|
||||
// wait for updateNotifyLoop to finish
|
||||
<-b.donec
|
||||
close(b.notifyCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func grpcHealthCheck(client *Client, ep string) (bool, error) {
|
||||
conn, err := client.dial(ep)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer conn.Close()
|
||||
cli := healthpb.NewHealthClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
|
||||
cancel()
|
||||
if err != nil {
|
||||
if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
|
||||
if s.Message() == unknownService { // etcd < v3.3.0
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
|
||||
}
|
||||
|
||||
func hasAddr(addrs []grpc.Address, targetAddr string) bool {
|
||||
for _, addr := range addrs {
|
||||
if targetAddr == addr.Addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getHost(ep string) string {
|
||||
url, uerr := url.Parse(ep)
|
||||
if uerr != nil || !strings.Contains(ep, "://") {
|
||||
return ep
|
||||
}
|
||||
return url.Host
|
||||
}
|
||||
|
||||
func eps2addrs(eps []string) []grpc.Address {
|
||||
addrs := make([]grpc.Address, len(eps))
|
||||
for i := range eps {
|
||||
addrs[i].Addr = getHost(eps[i])
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
|
||||
func getHostPort2ep(eps []string) map[string]string {
|
||||
hm := make(map[string]string, len(eps))
|
||||
for i := range eps {
|
||||
_, host, _ := parseEndpoint(eps[i])
|
||||
hm[host] = eps[i]
|
||||
}
|
||||
return hm
|
||||
}
|
211
vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go
generated
vendored
Normal file
211
vendor/github.com/coreos/etcd/clientv3/integration/black_hole_test.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !cluster_proxy
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/integration"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// TestBalancerUnderBlackholeKeepAliveWatch tests when watch discovers it cannot talk to
|
||||
// blackholed endpoint, client balancer switches to healthy one.
|
||||
// TODO: test server-to-client keepalive ping
|
||||
func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 2,
|
||||
GRPCKeepAliveMinTime: 1 * time.Millisecond, // avoid too_many_pings
|
||||
})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
|
||||
|
||||
ccfg := clientv3.Config{
|
||||
Endpoints: []string{eps[0]},
|
||||
DialTimeout: 1 * time.Second,
|
||||
DialKeepAliveTime: 1 * time.Second,
|
||||
DialKeepAliveTimeout: 500 * time.Millisecond,
|
||||
}
|
||||
|
||||
// gRPC internal implementation related.
|
||||
pingInterval := ccfg.DialKeepAliveTime + ccfg.DialKeepAliveTimeout
|
||||
// 3s for slow machine to process watch and reset connections
|
||||
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
|
||||
// dial for unhealthy endpoint.
|
||||
// then we can reduce 3s to 1s.
|
||||
timeout := pingInterval + 3*time.Second
|
||||
|
||||
cli, err := clientv3.New(ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
wch := cli.Watch(context.Background(), "foo", clientv3.WithCreatedNotify())
|
||||
if _, ok := <-wch; !ok {
|
||||
t.Fatalf("watch failed on creation")
|
||||
}
|
||||
|
||||
// endpoint can switch to eps[1] when it detects the failure of eps[0]
|
||||
cli.SetEndpoints(eps...)
|
||||
|
||||
clus.Members[0].Blackhole()
|
||||
|
||||
if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(timeout):
|
||||
t.Error("took too long to receive watch events")
|
||||
}
|
||||
|
||||
clus.Members[0].Unblackhole()
|
||||
|
||||
// waiting for moving eps[0] out of unhealthy, so that it can be re-pined.
|
||||
time.Sleep(ccfg.DialTimeout)
|
||||
|
||||
clus.Members[1].Blackhole()
|
||||
|
||||
// make sure client[0] can connect to eps[0] after remove the blackhole.
|
||||
if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = clus.Client(0).Put(context.TODO(), "foo", "bar1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-wch:
|
||||
case <-time.After(timeout):
|
||||
t.Error("took too long to receive watch events")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBalancerUnderBlackholeNoKeepAlivePut(t *testing.T) {
|
||||
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||
_, err := cli.Put(ctx, "foo", "bar")
|
||||
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||
return errExpected
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func TestBalancerUnderBlackholeNoKeepAliveDelete(t *testing.T) {
|
||||
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||
_, err := cli.Delete(ctx, "foo")
|
||||
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||
return errExpected
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func TestBalancerUnderBlackholeNoKeepAliveTxn(t *testing.T) {
|
||||
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||
_, err := cli.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.Version("foo"), "=", 0)).
|
||||
Then(clientv3.OpPut("foo", "bar")).
|
||||
Else(clientv3.OpPut("foo", "baz")).Commit()
|
||||
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||
return errExpected
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func TestBalancerUnderBlackholeNoKeepAliveLinearizableGet(t *testing.T) {
|
||||
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||
_, err := cli.Get(ctx, "a")
|
||||
if err == context.DeadlineExceeded || err == rpctypes.ErrTimeout {
|
||||
return errExpected
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func TestBalancerUnderBlackholeNoKeepAliveSerializableGet(t *testing.T) {
|
||||
testBalancerUnderBlackholeNoKeepAlive(t, func(cli *clientv3.Client, ctx context.Context) error {
|
||||
_, err := cli.Get(ctx, "a", clientv3.WithSerializable())
|
||||
if err == context.DeadlineExceeded {
|
||||
return errExpected
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// testBalancerUnderBlackholeNoKeepAlive ensures that first request to blackholed endpoint
|
||||
// fails due to context timeout, but succeeds on next try, with endpoint switch.
|
||||
func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Client, context.Context) error) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
|
||||
Size: 2,
|
||||
SkipCreatingClient: true,
|
||||
})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
|
||||
|
||||
ccfg := clientv3.Config{
|
||||
Endpoints: []string{eps[0]},
|
||||
DialTimeout: 1 * time.Second,
|
||||
}
|
||||
cli, err := clientv3.New(ccfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
// wait for eps[0] to be pinned
|
||||
mustWaitPinReady(t, cli)
|
||||
|
||||
// add all eps to list, so that when the original pined one fails
|
||||
// the client can switch to other available eps
|
||||
cli.SetEndpoints(eps...)
|
||||
|
||||
// blackhole eps[0]
|
||||
clus.Members[0].Blackhole()
|
||||
|
||||
// fail first due to blackhole, retry should succeed
|
||||
// TODO: first operation can succeed
|
||||
// when gRPC supports better retry on non-delivered request
|
||||
for i := 0; i < 2; i++ {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
err = op(cli, ctx)
|
||||
cancel()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if i == 0 {
|
||||
if err != errExpected {
|
||||
t.Errorf("#%d: expected %v, got %v", i, errExpected, err)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Errorf("#%d: failed with error %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user