mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
Use go mod instead of GO_PATH and add more cgo interfeces
Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
This commit is contained in:
parent
5a57b62f0c
commit
bb9c906ef6
@ -20,6 +20,10 @@ public:
|
||||
return schema_;
|
||||
}
|
||||
|
||||
std::string& get_collection_name() {
|
||||
return collection_name_;
|
||||
}
|
||||
|
||||
private:
|
||||
// TODO: add Index ptr
|
||||
// IndexPtr index_ = nullptr;
|
||||
|
@ -13,6 +13,10 @@ public:
|
||||
return schema_;
|
||||
}
|
||||
|
||||
std::string& get_partition_name() {
|
||||
return partition_name_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string partition_name_;
|
||||
SchemaPtr schema_;
|
||||
|
@ -89,7 +89,7 @@ class SegmentBase {
|
||||
void set_time_end(Timestamp time_end) {
|
||||
this->time_end_ = time_end;
|
||||
}
|
||||
uint64_t get_segment_id(uint64_t segment_id) {
|
||||
uint64_t get_segment_id() {
|
||||
return segment_id_;
|
||||
}
|
||||
uint64_t set_segment_id(uint64_t segment_id) {
|
||||
|
@ -8,6 +8,8 @@ NewCollection(const char* collection_name, const char* schema_conf) {
|
||||
|
||||
auto collection = std::make_unique<milvus::dog_segment::Collection>(name, conf);
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "create collection " << collection_name << std::endl;
|
||||
return (void*)collection.release();
|
||||
}
|
||||
|
||||
@ -15,5 +17,7 @@ void
|
||||
DeleteCollection(CCollection collection) {
|
||||
auto col = (milvus::dog_segment::Collection*)collection;
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "delete collection " << col->get_collection_name() << std::endl;
|
||||
delete col;
|
||||
}
|
||||
|
@ -12,11 +12,15 @@ NewPartition(CCollection collection, const char* partition_name) {
|
||||
|
||||
auto partition = std::make_unique<milvus::dog_segment::Partition>(name, schema);
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "create partition " << name << std::endl;
|
||||
return (void*)partition.release();
|
||||
}
|
||||
|
||||
void DeletePartition(CPartition partition) {
|
||||
auto p = (milvus::dog_segment::Partition*)partition;
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "delete partition " << p->get_partition_name() <<std::endl;
|
||||
delete p;
|
||||
}
|
||||
|
@ -10,12 +10,16 @@ NewSegment(CPartition partition, unsigned long segment_id) {
|
||||
|
||||
segment->set_segment_id(segment_id);
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "create segment " << segment_id << std::endl;
|
||||
return (void*)segment.release();
|
||||
}
|
||||
|
||||
void DeleteSegment(CSegmentBase segment) {
|
||||
auto s = (milvus::dog_segment::SegmentBase*)segment;
|
||||
|
||||
// TODO: delete print
|
||||
std::cout << "delete segment " << s->get_segment_id() << std::endl;
|
||||
delete s;
|
||||
}
|
||||
|
||||
|
@ -2,14 +2,47 @@
|
||||
#include <string>
|
||||
#include <random>
|
||||
#include <gtest/gtest.h>
|
||||
#include <dog_segment/SegmentBase.h>
|
||||
|
||||
#include "dog_segment/segment_c.h"
|
||||
#include "dog_segment/collection_c.h"
|
||||
#include "dog_segment/segment_c.h"
|
||||
|
||||
TEST(SegmentTest, InsertTest) {
|
||||
auto fake_schema = std::make_shared<milvus::dog_segment::Schema>();
|
||||
auto s = milvus::dog_segment::CreateSegment(fake_schema).release();
|
||||
TEST(CApiTest, CollectionTest) {
|
||||
auto collection_name = "collection0";
|
||||
auto schema_tmp_conf = "null_schema";
|
||||
auto collection = NewCollection(collection_name, schema_tmp_conf);
|
||||
DeleteCollection(collection);
|
||||
}
|
||||
|
||||
TEST(CApiTest, PartitonTest) {
|
||||
auto collection_name = "collection0";
|
||||
auto schema_tmp_conf = "null_schema";
|
||||
auto collection = NewCollection(collection_name, schema_tmp_conf);
|
||||
auto partition_name = "partition0";
|
||||
auto partition = NewPartition(collection, partition_name);
|
||||
DeleteCollection(collection);
|
||||
DeletePartition(partition);
|
||||
}
|
||||
|
||||
TEST(CApiTest, SegmentTest) {
|
||||
auto collection_name = "collection0";
|
||||
auto schema_tmp_conf = "null_schema";
|
||||
auto collection = NewCollection(collection_name, schema_tmp_conf);
|
||||
auto partition_name = "partition0";
|
||||
auto partition = NewPartition(collection, partition_name);
|
||||
auto segment = NewSegment(partition, 0);
|
||||
DeleteCollection(collection);
|
||||
DeletePartition(partition);
|
||||
DeleteSegment(segment);
|
||||
}
|
||||
|
||||
|
||||
TEST(CApiTest, InsertTest) {
|
||||
auto collection_name = "collection0";
|
||||
auto schema_tmp_conf = "null_schema";
|
||||
auto collection = NewCollection(collection_name, schema_tmp_conf);
|
||||
auto partition_name = "partition0";
|
||||
auto partition = NewPartition(collection, partition_name);
|
||||
auto segment = NewSegment(partition, 0);
|
||||
|
||||
std::vector<char> raw_data;
|
||||
std::vector<uint64_t> timestamps;
|
||||
@ -31,7 +64,11 @@ TEST(SegmentTest, InsertTest) {
|
||||
|
||||
auto line_sizeof = (sizeof(int) + sizeof(float) * 16);
|
||||
|
||||
auto res = Insert(s, N, uids.data(), timestamps.data(), raw_data.data(), (int)line_sizeof, N);
|
||||
auto res = Insert(segment, N, uids.data(), timestamps.data(), raw_data.data(), (int)line_sizeof, N);
|
||||
|
||||
std::cout << res << std::endl;
|
||||
}
|
||||
assert(res == 0);
|
||||
|
||||
DeleteCollection(collection);
|
||||
DeletePartition(partition);
|
||||
DeleteSegment(segment);
|
||||
}
|
||||
|
239
errors/errors.go
Normal file
239
errors/errors.go
Normal file
@ -0,0 +1,239 @@
|
||||
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// New returns an error with the supplied message.
|
||||
// New also records the stack trace at the point it was called.
|
||||
func New(message string) error {
|
||||
return &fundamental{
|
||||
msg: message,
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf formats according to a format specifier and returns the string
|
||||
// as a value that satisfies error.
|
||||
// Errorf also records the stack trace at the point it was called.
|
||||
func Errorf(format string, args ...interface{}) error {
|
||||
return &fundamental{
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
stack: callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// StackTraceAware is an optimization to avoid repetitive traversals of an error chain.
|
||||
// HasStack checks for this marker first.
|
||||
// Annotate/Wrap and Annotatef/Wrapf will produce this marker.
|
||||
type StackTraceAware interface {
|
||||
HasStack() bool
|
||||
}
|
||||
|
||||
// HasStack tells whether a StackTracer exists in the error chain
|
||||
func HasStack(err error) bool {
|
||||
if errWithStack, ok := err.(StackTraceAware); ok {
|
||||
return errWithStack.HasStack()
|
||||
}
|
||||
return GetStackTracer(err) != nil
|
||||
}
|
||||
|
||||
// fundamental is an error that has a message and a stack, but no caller.
|
||||
type fundamental struct {
|
||||
msg string
|
||||
*stack
|
||||
}
|
||||
|
||||
func (f *fundamental) Error() string { return f.msg }
|
||||
|
||||
func (f *fundamental) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
io.WriteString(s, f.msg)
|
||||
f.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, f.msg)
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", f.msg)
|
||||
}
|
||||
}
|
||||
|
||||
// WithStack annotates err with a stack trace at the point WithStack was called.
|
||||
// If err is nil, WithStack returns nil.
|
||||
//
|
||||
// For most use cases this is deprecated and AddStack should be used (which will ensure just one stack trace).
|
||||
// However, one may want to use this in some situations, for example to create a 2nd trace across a goroutine.
|
||||
func WithStack(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// AddStack is similar to WithStack.
|
||||
// However, it will first check with HasStack to see if a stack trace already exists in the causer chain before creating another one.
|
||||
func AddStack(err error) error {
|
||||
if HasStack(err) {
|
||||
return err
|
||||
}
|
||||
return WithStack(err)
|
||||
}
|
||||
|
||||
type withStack struct {
|
||||
error
|
||||
*stack
|
||||
}
|
||||
|
||||
func (w *withStack) Cause() error { return w.error }
|
||||
|
||||
func (w *withStack) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v", w.Cause())
|
||||
w.stack.Format(s, verb)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, w.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap returns an error annotating err with a stack trace
|
||||
// at the point Wrap is called, and the supplied message.
|
||||
// If err is nil, Wrap returns nil.
|
||||
//
|
||||
// For most use cases this is deprecated in favor of Annotate.
|
||||
// Annotate avoids creating duplicate stack traces.
|
||||
func Wrap(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
hasStack := HasStack(err)
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
causeHasStack: hasStack,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapf returns an error annotating err with a stack trace
|
||||
// at the point Wrapf is call, and the format specifier.
|
||||
// If err is nil, Wrapf returns nil.
|
||||
//
|
||||
// For most use cases this is deprecated in favor of Annotatef.
|
||||
// Annotatef avoids creating duplicate stack traces.
|
||||
func Wrapf(err error, format string, args ...interface{}) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
hasStack := HasStack(err)
|
||||
err = &withMessage{
|
||||
cause: err,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
causeHasStack: hasStack,
|
||||
}
|
||||
return &withStack{
|
||||
err,
|
||||
callers(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithMessage annotates err with a new message.
|
||||
// If err is nil, WithMessage returns nil.
|
||||
func WithMessage(err error, message string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &withMessage{
|
||||
cause: err,
|
||||
msg: message,
|
||||
causeHasStack: HasStack(err),
|
||||
}
|
||||
}
|
||||
|
||||
type withMessage struct {
|
||||
cause error
|
||||
msg string
|
||||
causeHasStack bool
|
||||
}
|
||||
|
||||
func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
|
||||
func (w *withMessage) Cause() error { return w.cause }
|
||||
func (w *withMessage) HasStack() bool { return w.causeHasStack }
|
||||
|
||||
func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%+v\n", w.Cause())
|
||||
io.WriteString(s, w.msg)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's', 'q':
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
cause := Unwrap(err)
|
||||
if cause == nil {
|
||||
return err
|
||||
}
|
||||
return Cause(cause)
|
||||
}
|
||||
|
||||
// Unwrap uses causer to return the next error in the chain or nil.
|
||||
// This goes one-level deeper, whereas Cause goes as far as possible
|
||||
func Unwrap(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
if unErr, ok := err.(causer); ok {
|
||||
return unErr.Cause()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find an error in the chain that matches a test function.
|
||||
// returns nil if no error is found.
|
||||
func Find(origErr error, test func(error) bool) error {
|
||||
var foundErr error
|
||||
WalkDeep(origErr, func(err error) bool {
|
||||
if test(err) {
|
||||
foundErr = err
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return foundErr
|
||||
}
|
42
errors/group.go
Normal file
42
errors/group.go
Normal file
@ -0,0 +1,42 @@
|
||||
package errors
|
||||
|
||||
// ErrorGroup is an interface for multiple errors that are not a chain.
|
||||
// This happens for example when executing multiple operations in parallel.
|
||||
type ErrorGroup interface {
|
||||
Errors() []error
|
||||
}
|
||||
|
||||
// Errors uses the ErrorGroup interface to return a slice of errors.
|
||||
// If the ErrorGroup interface is not implemented it returns an array containing just the given error.
|
||||
func Errors(err error) []error {
|
||||
if eg, ok := err.(ErrorGroup); ok {
|
||||
return eg.Errors()
|
||||
}
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
// WalkDeep does a depth-first traversal of all errors.
|
||||
// Any ErrorGroup is traversed (after going deep).
|
||||
// The visitor function can return true to end the traversal early
|
||||
// In that case, WalkDeep will return true, otherwise false.
|
||||
func WalkDeep(err error, visitor func(err error) bool) bool {
|
||||
// Go deep
|
||||
unErr := err
|
||||
for unErr != nil {
|
||||
if done := visitor(unErr); done {
|
||||
return true
|
||||
}
|
||||
unErr = Unwrap(unErr)
|
||||
}
|
||||
|
||||
// Go wide
|
||||
if group, ok := err.(ErrorGroup); ok {
|
||||
for _, err := range group.Errors() {
|
||||
if early := WalkDeep(err, visitor); early {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
226
errors/stack.go
Normal file
226
errors/stack.go
Normal file
@ -0,0 +1,226 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StackTracer retrieves the StackTrace
|
||||
// Generally you would want to use the GetStackTracer function to do that.
|
||||
type StackTracer interface {
|
||||
StackTrace() StackTrace
|
||||
}
|
||||
|
||||
// GetStackTracer will return the first StackTracer in the causer chain.
|
||||
// This function is used by AddStack to avoid creating redundant stack traces.
|
||||
//
|
||||
// You can also use the StackTracer interface on the returned error to get the stack trace.
|
||||
func GetStackTracer(origErr error) StackTracer {
|
||||
var stacked StackTracer
|
||||
WalkDeep(origErr, func(err error) bool {
|
||||
if stackTracer, ok := err.(StackTracer); ok {
|
||||
stacked = stackTracer
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
return stacked
|
||||
}
|
||||
|
||||
// Frame represents a program counter inside a stack frame.
|
||||
type Frame uintptr
|
||||
|
||||
// pc returns the program counter for this frame;
|
||||
// multiple frames may have the same PC value.
|
||||
func (f Frame) pc() uintptr { return uintptr(f) - 1 }
|
||||
|
||||
// file returns the full path to the file that contains the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) file() string {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return "unknown"
|
||||
}
|
||||
file, _ := fn.FileLine(f.pc())
|
||||
return file
|
||||
}
|
||||
|
||||
// line returns the line number of source code of the
|
||||
// function for this Frame's pc.
|
||||
func (f Frame) line() int {
|
||||
fn := runtime.FuncForPC(f.pc())
|
||||
if fn == nil {
|
||||
return 0
|
||||
}
|
||||
_, line := fn.FileLine(f.pc())
|
||||
return line
|
||||
}
|
||||
|
||||
// Format formats the frame according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s source file
|
||||
// %d source line
|
||||
// %n function name
|
||||
// %v equivalent to %s:%d
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+s function name and path of source file relative to the compile time
|
||||
// GOPATH separated by \n\t (<funcname>\n\t<path>)
|
||||
// %+v equivalent to %+s:%d
|
||||
func (f Frame) Format(s fmt.State, verb rune) {
|
||||
f.format(s, s, verb)
|
||||
}
|
||||
|
||||
// format allows stack trace printing calls to be made with a bytes.Buffer.
|
||||
func (f Frame) format(w io.Writer, s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 's':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
pc := f.pc()
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
io.WriteString(w, "unknown")
|
||||
} else {
|
||||
file, _ := fn.FileLine(pc)
|
||||
io.WriteString(w, fn.Name())
|
||||
io.WriteString(w, "\n\t")
|
||||
io.WriteString(w, file)
|
||||
}
|
||||
default:
|
||||
io.WriteString(w, path.Base(f.file()))
|
||||
}
|
||||
case 'd':
|
||||
io.WriteString(w, strconv.Itoa(f.line()))
|
||||
case 'n':
|
||||
name := runtime.FuncForPC(f.pc()).Name()
|
||||
io.WriteString(w, funcname(name))
|
||||
case 'v':
|
||||
f.format(w, s, 's')
|
||||
io.WriteString(w, ":")
|
||||
f.format(w, s, 'd')
|
||||
}
|
||||
}
|
||||
|
||||
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
|
||||
type StackTrace []Frame
|
||||
|
||||
// Format formats the stack of Frames according to the fmt.Formatter interface.
|
||||
//
|
||||
// %s lists source files for each Frame in the stack
|
||||
// %v lists the source file and line number for each Frame in the stack
|
||||
//
|
||||
// Format accepts flags that alter the printing of some verbs, as follows:
|
||||
//
|
||||
// %+v Prints filename, function, and line number for each Frame in the stack.
|
||||
func (st StackTrace) Format(s fmt.State, verb rune) {
|
||||
var b bytes.Buffer
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case s.Flag('+'):
|
||||
b.Grow(len(st) * stackMinLen)
|
||||
for _, fr := range st {
|
||||
b.WriteByte('\n')
|
||||
fr.format(&b, s, verb)
|
||||
}
|
||||
case s.Flag('#'):
|
||||
fmt.Fprintf(&b, "%#v", []Frame(st))
|
||||
default:
|
||||
st.formatSlice(&b, s, verb)
|
||||
}
|
||||
case 's':
|
||||
st.formatSlice(&b, s, verb)
|
||||
}
|
||||
io.Copy(s, &b)
|
||||
}
|
||||
|
||||
// formatSlice will format this StackTrace into the given buffer as a slice of
|
||||
// Frame, only valid when called with '%s' or '%v'.
|
||||
func (st StackTrace) formatSlice(b *bytes.Buffer, s fmt.State, verb rune) {
|
||||
b.WriteByte('[')
|
||||
if len(st) == 0 {
|
||||
b.WriteByte(']')
|
||||
return
|
||||
}
|
||||
|
||||
b.Grow(len(st) * (stackMinLen / 4))
|
||||
st[0].format(b, s, verb)
|
||||
for _, fr := range st[1:] {
|
||||
b.WriteByte(' ')
|
||||
fr.format(b, s, verb)
|
||||
}
|
||||
b.WriteByte(']')
|
||||
}
|
||||
|
||||
// stackMinLen is a best-guess at the minimum length of a stack trace. It
|
||||
// doesn't need to be exact, just give a good enough head start for the buffer
|
||||
// to avoid the expensive early growth.
|
||||
const stackMinLen = 96
|
||||
|
||||
// stack represents a stack of program counters.
|
||||
type stack []uintptr
|
||||
|
||||
func (s *stack) Format(st fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
switch {
|
||||
case st.Flag('+'):
|
||||
var b bytes.Buffer
|
||||
b.Grow(len(*s) * stackMinLen)
|
||||
for _, pc := range *s {
|
||||
f := Frame(pc)
|
||||
b.WriteByte('\n')
|
||||
f.format(&b, st, 'v')
|
||||
}
|
||||
io.Copy(st, &b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stack) StackTrace() StackTrace {
|
||||
f := make([]Frame, len(*s))
|
||||
for i := 0; i < len(f); i++ {
|
||||
f[i] = Frame((*s)[i])
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func callers() *stack {
|
||||
return callersSkip(4)
|
||||
}
|
||||
|
||||
func callersSkip(skip int) *stack {
|
||||
const depth = 32
|
||||
var pcs [depth]uintptr
|
||||
n := runtime.Callers(skip, pcs[:])
|
||||
var st stack = pcs[0:n]
|
||||
return &st
|
||||
}
|
||||
|
||||
// funcname removes the path prefix component of a function's name reported by func.Name().
|
||||
func funcname(name string) string {
|
||||
i := strings.LastIndex(name, "/")
|
||||
name = name[i+1:]
|
||||
i = strings.Index(name, ".")
|
||||
return name[i+1:]
|
||||
}
|
||||
|
||||
// NewStack is for library implementers that want to generate a stack trace.
|
||||
// Normally you should insted use AddStack to get an error with a stack trace.
|
||||
//
|
||||
// The result of this function can be turned into a stack trace by calling .StackTrace()
|
||||
//
|
||||
// This function takes an argument for the number of stack frames to skip.
|
||||
// This avoids putting stack generation function calls like this one in the stack trace.
|
||||
// A value of 0 will give you the line that called NewStack(0)
|
||||
// A library author wrapping this in their own function will want to use a value of at least 1.
|
||||
func NewStack(skip int) StackTracer {
|
||||
return callersSkip(skip + 3)
|
||||
}
|
26
go.mod
26
go.mod
@ -1,20 +1,32 @@
|
||||
module github.com/czs007/suvlim
|
||||
|
||||
go 1.14
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
||||
github.com/onsi/ginkgo v1.14.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/apache/pulsar/pulsar-client-go v0.0.0-20200901051823-800681aaa9af
|
||||
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/gogo/protobuf v1.3.1
|
||||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/minio/minio-go/v7 v7.0.5
|
||||
github.com/onsi/ginkgo v1.14.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712
|
||||
github.com/pingcap/errors v0.11.4
|
||||
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463
|
||||
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/tikv/client-go v0.0.0-20200723074018-095b94dc2430
|
||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b
|
||||
github.com/tikv/pd v2.1.19+incompatible
|
||||
go.etcd.io/etcd v3.3.25+incompatible
|
||||
google.golang.org/grpc v1.23.1
|
||||
go.uber.org/zap v1.15.0
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
||||
google.golang.org/grpc v1.31.1
|
||||
google.golang.org/grpc/examples v0.0.0-20200828165940-d8ef479ab79a // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
176
go.sum
176
go.sum
@ -40,88 +40,58 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/pulsar v1.22.1-incubating-candidate-2 h1:L4EBghcly9NMPtMKm6g+rHCz+0SR9rPHy7ejbnHwsBs=
|
||||
github.com/apache/pulsar v2.6.1+incompatible h1:zzRPza6C/gbycCruI5JKhsOqbHXQ15gGwPj7W32RFoU=
|
||||
github.com/apache/pulsar/pulsar-client-go v0.0.0-20200828051501-9471ac754589 h1:P7gpq9dYwIyAX/vtMmBCkWToE1IhxN1DRVH+76mcBcU=
|
||||
github.com/apache/pulsar/pulsar-client-go v0.0.0-20200828051501-9471ac754589/go.mod h1:QdYxU2iG99VVU6cvoBRkCgkazfJSL9WwPZ20PZR6aUk=
|
||||
github.com/apache/pulsar/pulsar-client-go v0.0.0-20200901051823-800681aaa9af h1:hE//BVq7iKVPkKwA9ZHGUF8DY/DhkzBGN7LGDmUpzC8=
|
||||
github.com/apache/pulsar/pulsar-client-go v0.0.0-20200901051823-800681aaa9af/go.mod h1:QdYxU2iG99VVU6cvoBRkCgkazfJSL9WwPZ20PZR6aUk=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa v0.0.1 h1:r3ncXbtIiad9owWu22r8ryYogBEV9NbJykk8k6K+u0w=
|
||||
github.com/cncf/udpa v0.0.1 h1:r3ncXbtIiad9owWu22r8ryYogBEV9NbJykk8k6K+u0w=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v0.5.0-alpha.5 h1:0Qi6Jzjk2CDuuGlIeecpu+em2nrjhOgz2wsIwCmQHmc=
|
||||
github.com/coreos/etcd v3.3.12+incompatible h1:pAWNwdf7QiT1zfaWyqCtNZQWCLByQyA3JrSQyuYAqnQ=
|
||||
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.25+incompatible h1:0GQEw6h3YnuOVdtwygkIfJ+Omx0tZ8/QkVyXI4LkbeY=
|
||||
github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 h1:FE783w8WFh+Rvg+7bZ5g8p7gP4SeVS4AoNwkvazlsBg=
|
||||
github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/data-plane-api v0.0.0-20200823234036-b215ae4c0e16 h1:GLGI0UaYAaQ3udUVbWNUWyLwp+agjr+++DW0N43HEcU=
|
||||
github.com/envoyproxy/data-plane-api v0.0.0-20200823234036-b215ae4c0e16 h1:GLGI0UaYAaQ3udUVbWNUWyLwp+agjr+++DW0N43HEcU=
|
||||
github.com/envoyproxy/go-control-plane v0.8.6/go.mod h1:XB9+ce7x+IrsjgIVnRnql0O61gj/np0/bGDfhJI3sCU=
|
||||
github.com/envoyproxy/data-plane-api v0.0.0-20200831211337-0e9fae47599b h1:3i9GfD1fHIhNAr4aiJn7ndTkdtTtJjRh2C5glHI/D3g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.0-20190405222122-d6164de49109/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9 h1:EGUd+AQfZoi1OwZAoqekLbl4kq6tafFtKQSiN8nL21Y=
|
||||
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@ -136,10 +106,8 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
@ -148,15 +116,12 @@ github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
@ -166,7 +131,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
@ -181,7 +145,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
@ -190,44 +153,36 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U=
|
||||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.1 h1:VNUuLKyFcJ5IektwBKcZU4J5GJKEt+Odb8dl1d61BGQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
@ -235,13 +190,12 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/linkedin/goavro v2.1.0+incompatible h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY=
|
||||
github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4=
|
||||
github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw=
|
||||
github.com/minio/minio-go v1.0.0 h1:ooSujki+Z1PRGZsYffJw5jnF5eMBvzMVV86TLAlM0UM=
|
||||
github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=
|
||||
github.com/minio/minio-go/v7 v7.0.5 h1:I2NIJ2ojwJqD/YByemC1M59e1b4FW9kS7NlOar7HPV4=
|
||||
github.com/minio/minio-go/v7 v7.0.5/go.mod h1:TA0CQCjJZHM5SJj9IjqR0NmpmQJ6bCbXifAJ3mUU6Hw=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
@ -256,68 +210,55 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk=
|
||||
github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.12.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.12.3/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
|
||||
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390=
|
||||
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
|
||||
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3 h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo=
|
||||
github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3/go.mod h1:DazNTg0PTldtpsQiT9I5tVJwV1onHMKBBgXzmJUlMns=
|
||||
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=
|
||||
github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
|
||||
github.com/pingcap/kvproto v0.0.0-20190305055742-ab7debc182d9 h1:EsTt42btov+tFchxOFKnxBNmXOWyPKiddOwvr/WO90g=
|
||||
github.com/pingcap/kvproto v0.0.0-20190305055742-ab7debc182d9/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY=
|
||||
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
||||
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 h1:Jboj+s4jSCp5E1WDgmRUv5rIFKFHaaSWuSZ4wMwXIcc=
|
||||
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
|
||||
github.com/pingcap/pd v2.1.5+incompatible h1:vOLV2tSQdRjjmxaTXtJULoC94dYQOd+6fzn2yChODHc=
|
||||
github.com/pingcap/pd v2.1.5+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E=
|
||||
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48 h1:2JCf+JCLBs7IUZzYdIrSDN+GWYacKOdToIAt5zcga54=
|
||||
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48/go.mod h1:43j3yLP9UiXa0z95/W3hN7yTjoxsQoOll5rrGBgBcnE=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/protocolbuffers/protobuf v3.13.0+incompatible h1:omZA3Tuq+U2kJ2uMuqMR9c1VO5qLEgZ19m9878fXNtg=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
|
||||
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
@ -325,8 +266,6 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
|
||||
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@ -334,9 +273,11 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tikv/client-go v0.0.0-20200723074018-095b94dc2430 h1:qrLc3hp4FJfZ+AzQHkf2BmlYYyj+RhSaYC8JeliV1KE=
|
||||
github.com/tikv/client-go v0.0.0-20200723074018-095b94dc2430/go.mod h1:I3o4nCR8z2GtgGnqN/YCK5wgr/9bGtkJvCQgtKkTmo8=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b h1:VOG2GkM7RpRrT0St7HIIwCWrc3mVdf+DjcT8r2ucusI=
|
||||
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b/go.mod h1:K0NcdVNrXDq92YPLytsrAwRMyuXi7GZCO6dXNH7OzQc=
|
||||
github.com/tikv/pd v1.1.0-beta h1:Ke5jqQ7P1zS9ZbbEW1ZPFmMTQPTzrQEMsCumjVkRh1Y=
|
||||
github.com/tikv/pd v2.1.19+incompatible h1:rqjHqO7t/STke/R2Yz6+lQj6NPA8u7G2Otwqup4K+P8=
|
||||
github.com/tikv/pd v2.1.19+incompatible/go.mod h1:v6C/D7ONC49SgjI4jbGnooSizvijaO/bdIm62DVR4tI=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs=
|
||||
@ -345,7 +286,6 @@ github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43 h1:BasDe+IErOQKrMV
|
||||
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
||||
github.com/unrolled/render v1.0.0 h1:XYtvhA3UkpB7PqkvhUFYmpKD55OudoIeygcfus4vcd4=
|
||||
github.com/unrolled/render v1.0.0/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -353,11 +293,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5 h1:VOolFSo3XgsmnYDLozjvZ6JL6AAwIDu1Yx1y+4EYLDo=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.etcd.io/etcd v3.3.25+incompatible h1:V1RzkZJj9LqsJRy+TUBgpWSbZXITLB819lstuTFoZOY=
|
||||
go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@ -365,19 +301,19 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -425,7 +361,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -438,7 +373,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@ -449,10 +383,8 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@ -473,21 +405,16 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190508220229-2d0786266e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -501,21 +428,17 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4=
|
||||
golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@ -540,8 +463,9 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310CI4WGPCNPyrLbE7WZA8Y=
|
||||
golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
@ -597,7 +521,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f h1:FU37niK8AQ59mHcskRyQL7H0ErSeNh650vdcj8HqdSI=
|
||||
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
@ -623,7 +546,6 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
@ -631,39 +553,27 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28=
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
google.golang.org/grpc v1.25.0/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs=
|
||||
google.golang.org/grpc/examples v0.0.0-20200827205515-d25c71b54334 h1:B2p/7/WEaKkn0pzU35mBDEf0tVeRqevH41hDGWBVBcY=
|
||||
google.golang.org/grpc/examples v0.0.0-20200827205515-d25c71b54334/go.mod h1:Lh55/1hxmVHEkOvSIQ2uj0P12QyOCUNyRwnUlSS13hw=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc/examples v0.0.0-20200828165940-d8ef479ab79a h1:7iRJyssym7732TmOPsstcu7CtG53rDKSiL+yhxbN+5Y=
|
||||
google.golang.org/grpc/examples v0.0.0-20200828165940-d8ef479ab79a/go.mod h1:Lh55/1hxmVHEkOvSIQ2uj0P12QyOCUNyRwnUlSS13hw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.0/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.20.1/go.mod h1:KqelGeouBkcbcuB3HCk4/YH2tmNLk6YSWA5LIWeI/lY=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
@ -675,8 +585,8 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
@ -690,10 +600,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@ -705,10 +613,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
istio.io/gogo-genproto v0.0.0-20190731221249-06e20ada0df2/go.mod h1:IjvrbUlRbbw4JCpsgvgihcz9USUwEoNTL/uwMtyV5yk=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
230
master/client/base_client.go
Normal file
230
master/client/base_client.go
Normal file
@ -0,0 +1,230 @@
|
||||
// Copyright 2019 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/czs007/suvlim/util/grpcutil"
|
||||
"github.com/pingcap/log"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// baseClient is a basic client for all other complex client.
|
||||
type baseClient struct {
|
||||
urls []string
|
||||
clusterID uint64
|
||||
connMu struct {
|
||||
sync.RWMutex
|
||||
clientConns map[string]*grpc.ClientConn
|
||||
leader string
|
||||
}
|
||||
|
||||
checkLeaderCh chan struct{}
|
||||
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
security SecurityOption
|
||||
|
||||
gRPCDialOptions []grpc.DialOption
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// SecurityOption records options about tls
|
||||
type SecurityOption struct {
|
||||
CAPath string
|
||||
CertPath string
|
||||
KeyPath string
|
||||
}
|
||||
|
||||
// ClientOption configures client.
|
||||
type ClientOption func(c *baseClient)
|
||||
|
||||
// WithGRPCDialOptions configures the client with gRPC dial options.
|
||||
func WithGRPCDialOptions(opts ...grpc.DialOption) ClientOption {
|
||||
return func(c *baseClient) {
|
||||
c.gRPCDialOptions = append(c.gRPCDialOptions, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// WithCustomTimeoutOption configures the client with timeout option.
|
||||
func WithCustomTimeoutOption(timeout time.Duration) ClientOption {
|
||||
return func(c *baseClient) {
|
||||
c.timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// newBaseClient returns a new baseClient.
|
||||
func newBaseClient(ctx context.Context, urls []string, security SecurityOption, opts ...ClientOption) (*baseClient, error) {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
c := &baseClient{
|
||||
urls: urls,
|
||||
checkLeaderCh: make(chan struct{}, 1),
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
security: security,
|
||||
timeout: defaultPDTimeout,
|
||||
}
|
||||
c.connMu.clientConns = make(map[string]*grpc.ClientConn)
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
log.Info("[pd] init cluster id", zap.Uint64("cluster-id", c.clusterID))
|
||||
|
||||
c.wg.Add(1)
|
||||
go c.leaderLoop()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *baseClient) initRetry(f func() error) error {
|
||||
var err error
|
||||
for i := 0; i < maxInitClusterRetries; i++ {
|
||||
if err = f(); err == nil {
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return err
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (c *baseClient) leaderLoop() {
|
||||
defer c.wg.Done()
|
||||
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.checkLeaderCh:
|
||||
case <-time.After(time.Minute):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ScheduleCheckLeader is used to check leader.
|
||||
func (c *baseClient) ScheduleCheckLeader() {
|
||||
select {
|
||||
case c.checkLeaderCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// GetClusterID returns the ClusterID.
|
||||
func (c *baseClient) GetClusterID(context.Context) uint64 {
|
||||
return c.clusterID
|
||||
}
|
||||
|
||||
// GetLeaderAddr returns the leader address.
|
||||
// For testing use.
|
||||
func (c *baseClient) GetLeaderAddr() string {
|
||||
c.connMu.RLock()
|
||||
defer c.connMu.RUnlock()
|
||||
return c.connMu.leader
|
||||
}
|
||||
|
||||
// GetURLs returns the URLs.
|
||||
// For testing use. It should only be called when the client is closed.
|
||||
func (c *baseClient) GetURLs() []string {
|
||||
return c.urls
|
||||
}
|
||||
|
||||
|
||||
func (c *baseClient) updateURLs(members []*pdpb.Member) {
|
||||
urls := make([]string, 0, len(members))
|
||||
for _, m := range members {
|
||||
urls = append(urls, m.GetClientUrls()...)
|
||||
}
|
||||
|
||||
sort.Strings(urls)
|
||||
// the url list is same.
|
||||
if reflect.DeepEqual(c.urls, urls) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("[pd] update member urls", zap.Strings("old-urls", c.urls), zap.Strings("new-urls", urls))
|
||||
c.urls = urls
|
||||
}
|
||||
|
||||
func (c *baseClient) switchLeader(addrs []string) error {
|
||||
// FIXME: How to safely compare leader urls? For now, only allows one client url.
|
||||
addr := addrs[0]
|
||||
|
||||
c.connMu.RLock()
|
||||
oldLeader := c.connMu.leader
|
||||
c.connMu.RUnlock()
|
||||
|
||||
if addr == oldLeader {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("[pd] switch leader", zap.String("new-leader", addr), zap.String("old-leader", oldLeader))
|
||||
if _, err := c.getOrCreateGRPCConn(addr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
c.connMu.leader = addr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *baseClient) getOrCreateGRPCConn(addr string) (*grpc.ClientConn, error) {
|
||||
c.connMu.RLock()
|
||||
conn, ok := c.connMu.clientConns[addr]
|
||||
c.connMu.RUnlock()
|
||||
if ok {
|
||||
return conn, nil
|
||||
}
|
||||
tlsCfg, err := grpcutil.SecurityConfig{
|
||||
CAPath: c.security.CAPath,
|
||||
CertPath: c.security.CertPath,
|
||||
KeyPath: c.security.KeyPath,
|
||||
}.ToTLSConfig()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
dctx, cancel := context.WithTimeout(c.ctx, dialTimeout)
|
||||
defer cancel()
|
||||
cc, err := grpcutil.GetClientConn(dctx, addr, tlsCfg, c.gRPCDialOptions...)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
if old, ok := c.connMu.clientConns[addr]; ok {
|
||||
cc.Close()
|
||||
log.Debug("use old connection", zap.String("target", cc.Target()), zap.String("state", cc.GetState().String()))
|
||||
return old, nil
|
||||
}
|
||||
|
||||
c.connMu.clientConns[addr] = cc
|
||||
return cc, nil
|
||||
}
|
405
master/client/client.go
Normal file
405
master/client/client.go
Normal file
@ -0,0 +1,405 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/pingcap/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
||||
// Client is a PD (Placement Driver) client.
|
||||
// It should not be used after calling Close().
|
||||
type Client interface {
|
||||
// GetClusterID gets the cluster ID from PD.
|
||||
GetClusterID(ctx context.Context) uint64
|
||||
// GetMemberInfo gets the members Info from PD
|
||||
//GetMemberInfo(ctx context.Context) ([]*pdpb.Member, error)
|
||||
// GetLeaderAddr returns current leader's address. It returns "" before
|
||||
// syncing leader from server.
|
||||
GetLeaderAddr() string
|
||||
// GetTS gets a timestamp from PD.
|
||||
GetTS(ctx context.Context) (int64, int64, error)
|
||||
// GetTSAsync gets a timestamp from PD, without block the caller.
|
||||
GetTSAsync(ctx context.Context) TSFuture
|
||||
Close()
|
||||
}
|
||||
|
||||
type tsoRequest struct {
|
||||
start time.Time
|
||||
ctx context.Context
|
||||
done chan error
|
||||
physical int64
|
||||
logical int64
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPDTimeout = 3 * time.Second
|
||||
dialTimeout = 3 * time.Second
|
||||
updateLeaderTimeout = time.Second // Use a shorter timeout to recover faster from network isolation.
|
||||
maxMergeTSORequests = 10000 // should be higher if client is sending requests in burst
|
||||
maxInitClusterRetries = 100
|
||||
)
|
||||
|
||||
var (
|
||||
// errFailInitClusterID is returned when failed to load clusterID from all supplied PD addresses.
|
||||
errFailInitClusterID = errors.New("[pd] failed to get cluster id")
|
||||
// errClosing is returned when request is canceled when client is closing.
|
||||
errClosing = errors.New("[pd] closing")
|
||||
// errTSOLength is returned when the number of response timestamps is inconsistent with request.
|
||||
errTSOLength = errors.New("[pd] tso length in rpc response is incorrect")
|
||||
)
|
||||
|
||||
type client struct {
|
||||
*baseClient
|
||||
tsoRequests chan *tsoRequest
|
||||
|
||||
lastPhysical int64
|
||||
lastLogical int64
|
||||
|
||||
tsDeadlineCh chan deadline
|
||||
}
|
||||
|
||||
// NewClient creates a PD client.
|
||||
func NewClient(pdAddrs []string, security SecurityOption, opts ...ClientOption) (Client, error) {
|
||||
return NewClientWithContext(context.Background(), pdAddrs, security, opts...)
|
||||
}
|
||||
|
||||
// NewClientWithContext creates a PD client with context.
|
||||
func NewClientWithContext(ctx context.Context, pdAddrs []string, security SecurityOption, opts ...ClientOption) (Client, error) {
|
||||
log.Info("[pd] create pd client with endpoints", zap.Strings("pd-address", pdAddrs))
|
||||
base, err := newBaseClient(ctx, addrsToUrls(pdAddrs), security, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &client{
|
||||
baseClient: base,
|
||||
tsoRequests: make(chan *tsoRequest, maxMergeTSORequests),
|
||||
tsDeadlineCh: make(chan deadline, 1),
|
||||
}
|
||||
|
||||
c.wg.Add(2)
|
||||
go c.tsLoop()
|
||||
go c.tsCancelLoop()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type deadline struct {
|
||||
timer <-chan time.Time
|
||||
done chan struct{}
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (c *client) tsCancelLoop() {
|
||||
defer c.wg.Done()
|
||||
|
||||
ctx, cancel := context.WithCancel(c.ctx)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case d := <-c.tsDeadlineCh:
|
||||
select {
|
||||
case <-d.timer:
|
||||
log.Error("tso request is canceled due to timeout")
|
||||
d.cancel()
|
||||
case <-d.done:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) checkStreamTimeout(loopCtx context.Context, cancel context.CancelFunc, createdCh chan struct{}) {
|
||||
select {
|
||||
case <-time.After(c.timeout):
|
||||
cancel()
|
||||
case <-createdCh:
|
||||
return
|
||||
case <-loopCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func (c *client) tsLoop() {
|
||||
defer c.wg.Done()
|
||||
|
||||
loopCtx, loopCancel := context.WithCancel(c.ctx)
|
||||
defer loopCancel()
|
||||
|
||||
defaultSize := maxMergeTSORequests + 1
|
||||
requests := make([]*tsoRequest, defaultSize)
|
||||
createdCh := make(chan struct{})
|
||||
|
||||
var opts []opentracing.StartSpanOption
|
||||
var stream pdpb.PD_TsoClient
|
||||
var cancel context.CancelFunc
|
||||
|
||||
for {
|
||||
var err error
|
||||
|
||||
if stream == nil {
|
||||
var ctx context.Context
|
||||
ctx, cancel = context.WithCancel(loopCtx)
|
||||
go c.checkStreamTimeout(loopCtx, cancel, createdCh)
|
||||
stream, err = c.leaderClient().Tso(ctx)
|
||||
if stream != nil {
|
||||
createdCh <- struct{}{}
|
||||
}
|
||||
if err != nil {
|
||||
select {
|
||||
case <-loopCtx.Done():
|
||||
cancel()
|
||||
return
|
||||
default:
|
||||
}
|
||||
log.Error("[pd] create tso stream error")
|
||||
c.ScheduleCheckLeader()
|
||||
cancel()
|
||||
c.revokeTSORequest(errors.WithStack(err))
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
case <-loopCtx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case first := <-c.tsoRequests:
|
||||
pendingPlus1 := len(c.tsoRequests) + 1
|
||||
requests[0] = first
|
||||
for i := 1; i < pendingPlus1; i++ {
|
||||
requests[i] = <-c.tsoRequests
|
||||
}
|
||||
done := make(chan struct{})
|
||||
dl := deadline{
|
||||
timer: time.After(c.timeout),
|
||||
done: done,
|
||||
cancel: cancel,
|
||||
}
|
||||
select {
|
||||
case c.tsDeadlineCh <- dl:
|
||||
case <-loopCtx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
opts = extractSpanReference(requests[:pendingPlus1], opts[:0])
|
||||
err = c.processTSORequests(stream, requests[:pendingPlus1], opts)
|
||||
close(done)
|
||||
case <-loopCtx.Done():
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
select {
|
||||
case <-loopCtx.Done():
|
||||
cancel()
|
||||
return
|
||||
default:
|
||||
}
|
||||
log.Error("[pd] getTS error")
|
||||
c.ScheduleCheckLeader()
|
||||
cancel()
|
||||
stream, cancel = nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func extractSpanReference(requests []*tsoRequest, opts []opentracing.StartSpanOption) []opentracing.StartSpanOption {
|
||||
for _, req := range requests {
|
||||
if span := opentracing.SpanFromContext(req.ctx); span != nil {
|
||||
opts = append(opts, opentracing.ChildOf(span.Context()))
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func (c *client) processTSORequests(stream pdpb.PD_TsoClient, requests []*tsoRequest, opts []opentracing.StartSpanOption) error {
|
||||
if len(opts) > 0 {
|
||||
span := opentracing.StartSpan("pdclient.processTSORequests", opts...)
|
||||
defer span.Finish()
|
||||
}
|
||||
count := len(requests)
|
||||
//start := time.Now()
|
||||
req := &pdpb.TsoRequest{
|
||||
Header: c.requestHeader(),
|
||||
Count: uint32(count),
|
||||
}
|
||||
|
||||
if err := stream.Send(req); err != nil {
|
||||
err = errors.WithStack(err)
|
||||
c.finishTSORequest(requests, 0, 0, err)
|
||||
return err
|
||||
}
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
err = errors.WithStack(err)
|
||||
c.finishTSORequest(requests, 0, 0, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.GetCount() != uint32(len(requests)) {
|
||||
err = errors.WithStack(errTSOLength)
|
||||
c.finishTSORequest(requests, 0, 0, err)
|
||||
return err
|
||||
}
|
||||
|
||||
physical, logical := resp.GetTimestamp().GetPhysical(), resp.GetTimestamp().GetLogical()
|
||||
// Server returns the highest ts.
|
||||
logical -= int64(resp.GetCount() - 1)
|
||||
if tsLessEqual(physical, logical, c.lastPhysical, c.lastLogical) {
|
||||
panic(errors.Errorf("timestamp fallback, newly acquired ts (%d,%d) is less or equal to last one (%d, %d)",
|
||||
physical, logical, c.lastLogical, c.lastLogical))
|
||||
}
|
||||
c.lastPhysical = physical
|
||||
c.lastLogical = logical + int64(len(requests)) - 1
|
||||
c.finishTSORequest(requests, physical, logical, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func tsLessEqual(physical, logical, thatPhysical, thatLogical int64) bool {
|
||||
if physical == thatPhysical {
|
||||
return logical <= thatLogical
|
||||
}
|
||||
return physical < thatPhysical
|
||||
}
|
||||
|
||||
func (c *client) finishTSORequest(requests []*tsoRequest, physical, firstLogical int64, err error) {
|
||||
for i := 0; i < len(requests); i++ {
|
||||
if span := opentracing.SpanFromContext(requests[i].ctx); span != nil {
|
||||
span.Finish()
|
||||
}
|
||||
requests[i].physical, requests[i].logical = physical, firstLogical+int64(i)
|
||||
requests[i].done <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) revokeTSORequest(err error) {
|
||||
n := len(c.tsoRequests)
|
||||
for i := 0; i < n; i++ {
|
||||
req := <-c.tsoRequests
|
||||
req.done <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) Close() {
|
||||
c.cancel()
|
||||
c.wg.Wait()
|
||||
|
||||
c.revokeTSORequest(errors.WithStack(errClosing))
|
||||
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
for _, cc := range c.connMu.clientConns {
|
||||
if err := cc.Close(); err != nil {
|
||||
log.Error("[pd] failed to close gRPC clientConn")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// leaderClient gets the client of current PD leader.
|
||||
func (c *client) leaderClient() pdpb.PDClient {
|
||||
c.connMu.RLock()
|
||||
defer c.connMu.RUnlock()
|
||||
|
||||
return pdpb.NewPDClient(c.connMu.clientConns[c.connMu.leader])
|
||||
}
|
||||
|
||||
var tsoReqPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &tsoRequest{
|
||||
done: make(chan error, 1),
|
||||
physical: 0,
|
||||
logical: 0,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func (c *client) GetTSAsync(ctx context.Context) TSFuture {
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
span = opentracing.StartSpan("GetTSAsync", opentracing.ChildOf(span.Context()))
|
||||
ctx = opentracing.ContextWithSpan(ctx, span)
|
||||
}
|
||||
req := tsoReqPool.Get().(*tsoRequest)
|
||||
req.ctx = ctx
|
||||
req.start = time.Now()
|
||||
c.tsoRequests <- req
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
// TSFuture is a future which promises to return a TSO.
|
||||
type TSFuture interface {
|
||||
// Wait gets the physical and logical time, it would block caller if data is not available yet.
|
||||
Wait() (int64, int64, error)
|
||||
}
|
||||
|
||||
func (req *tsoRequest) Wait() (physical int64, logical int64, err error) {
|
||||
// If tso command duration is observed very high, the reason could be it
|
||||
// takes too long for Wait() be called.
|
||||
select {
|
||||
case err = <-req.done:
|
||||
err = errors.WithStack(err)
|
||||
defer tsoReqPool.Put(req)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
physical, logical = req.physical, req.logical
|
||||
return
|
||||
case <-req.ctx.Done():
|
||||
return 0, 0, errors.WithStack(req.ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) GetTS(ctx context.Context) (physical int64, logical int64, err error) {
|
||||
resp := c.GetTSAsync(ctx)
|
||||
return resp.Wait()
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (c *client) requestHeader() *pdpb.RequestHeader {
|
||||
return &pdpb.RequestHeader{
|
||||
ClusterId: c.clusterID,
|
||||
}
|
||||
}
|
||||
|
||||
func addrsToUrls(addrs []string) []string {
|
||||
// Add default schema "http://" to addrs.
|
||||
urls := make([]string, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
if strings.Contains(addr, "://") {
|
||||
urls = append(urls, addr)
|
||||
} else {
|
||||
urls = append(urls, "http://"+addr)
|
||||
}
|
||||
}
|
||||
return urls
|
||||
}
|
537
master/config/config.go
Normal file
537
master/config/config.go
Normal file
@ -0,0 +1,537 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/czs007/suvlim/util/grpcutil"
|
||||
//"google.golang.org/grpc"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"github.com/pingcap/log"
|
||||
"go.etcd.io/etcd/embed"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Config is the pd server configuration.
|
||||
type Config struct {
|
||||
flagSet *flag.FlagSet
|
||||
Version bool `json:"-"`
|
||||
ConfigCheck bool `json:"-"`
|
||||
|
||||
ClientUrls string `toml:"client-urls" json:"client-urls"`
|
||||
PeerUrls string `toml:"peer-urls" json:"peer-urls"`
|
||||
AdvertiseClientUrls string `toml:"advertise-client-urls" json:"advertise-client-urls"`
|
||||
AdvertisePeerUrls string `toml:"advertise-peer-urls" json:"advertise-peer-urls"`
|
||||
|
||||
|
||||
Name string `toml:"name" json:"name"`
|
||||
DataDir string `toml:"data-dir" json:"data-dir"`
|
||||
EnableGRPCGateway bool `json:"enable-grpc-gateway"`
|
||||
|
||||
InitialCluster string `toml:"initial-cluster" json:"initial-cluster"`
|
||||
InitialClusterState string `toml:"initial-cluster-state" json:"initial-cluster-state"`
|
||||
InitialClusterToken string `toml:"initial-cluster-token" json:"initial-cluster-token"`
|
||||
|
||||
|
||||
LeaderLease int64 `toml:"lease" json:"lease"`
|
||||
Log log.Config `toml:"log" json:"log"`
|
||||
LogFileDeprecated string `toml:"log-file" json:"log-file,omitempty"`
|
||||
LogLevelDeprecated string `toml:"log-level" json:"log-level,omitempty"`
|
||||
|
||||
PDServerCfg PDServerConfig `toml:"pd-server" json:"pd-server"`
|
||||
|
||||
TickInterval typeutil.Duration `toml:"tick-interval"`
|
||||
ElectionInterval typeutil.Duration `toml:"election-interval"`
|
||||
DisableStrictReconfigCheck bool
|
||||
|
||||
// TsoSaveInterval is the interval to save timestamp.
|
||||
TsoSaveInterval typeutil.Duration `toml:"tso-save-interval" json:"tso-save-interval"`
|
||||
|
||||
PreVote bool `toml:"enable-prevote"`
|
||||
Security grpcutil.SecurityConfig `toml:"security" json:"security"`
|
||||
|
||||
configFile string
|
||||
|
||||
// For all warnings during parsing.
|
||||
WarningMsgs []string
|
||||
|
||||
HeartbeatStreamBindInterval typeutil.Duration
|
||||
|
||||
logger *zap.Logger
|
||||
logProps *log.ZapProperties
|
||||
|
||||
//ServiceRegister func(*grpc.Server) `json:"-"`
|
||||
}
|
||||
|
||||
// NewConfig creates a new config.
|
||||
func NewConfig() *Config {
|
||||
cfg := &Config{}
|
||||
cfg.flagSet = flag.NewFlagSet("pd", flag.ContinueOnError)
|
||||
fs := cfg.flagSet
|
||||
|
||||
fs.BoolVar(&cfg.Version, "V", false, "print version information and exit")
|
||||
fs.BoolVar(&cfg.Version, "version", false, "print version information and exit")
|
||||
fs.StringVar(&cfg.configFile, "config", "", "config file")
|
||||
fs.BoolVar(&cfg.ConfigCheck, "config-check", false, "check config file validity and exit")
|
||||
|
||||
fs.StringVar(&cfg.Name, "name", "", "human-readable name for this pd member")
|
||||
|
||||
fs.StringVar(&cfg.DataDir, "data-dir", "", "path to the data directory (default 'default.${name}')")
|
||||
fs.StringVar(&cfg.ClientUrls, "client-urls", defaultClientUrls, "url for client traffic")
|
||||
fs.StringVar(&cfg.AdvertiseClientUrls, "advertise-client-urls", "", "advertise url for client traffic (default '${client-urls}')")
|
||||
fs.StringVar(&cfg.PeerUrls, "peer-urls", defaultPeerUrls, "url for peer traffic")
|
||||
fs.StringVar(&cfg.AdvertisePeerUrls, "advertise-peer-urls", "", "advertise url for peer traffic (default '${peer-urls}')")
|
||||
fs.StringVar(&cfg.InitialCluster, "initial-cluster", "", "initial cluster configuration for bootstrapping, e,g. pd=http://127.0.0.1:2380")
|
||||
|
||||
fs.StringVar(&cfg.Log.Level, "L", "", "log level: debug, info, warn, error, fatal (default 'info')")
|
||||
fs.StringVar(&cfg.Log.File.Filename, "log-file", "", "log file path")
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
||||
const (
|
||||
defaultLeaderLease = int64(3)
|
||||
defaultName = "pd"
|
||||
defaultClientUrls = "http://127.0.0.1:2379"
|
||||
defaultPeerUrls = "http://127.0.0.1:2380"
|
||||
defaultInitialClusterState = embed.ClusterStateFlagNew
|
||||
defaultInitialClusterToken = "pd-cluster"
|
||||
|
||||
// etcd use 100ms for heartbeat and 1s for election timeout.
|
||||
// We can enlarge both a little to reduce the network aggression.
|
||||
// now embed etcd use TickMs for heartbeat, we will update
|
||||
// after embed etcd decouples tick and heartbeat.
|
||||
defaultTickInterval = 500 * time.Millisecond
|
||||
// embed etcd has a check that `5 * tick > election`
|
||||
defaultElectionInterval = 3000 * time.Millisecond
|
||||
|
||||
defaultHeartbeatStreamRebindInterval = time.Minute
|
||||
|
||||
defaultMaxResetTSGap = 24 * time.Hour
|
||||
defaultEnableGRPCGateway = true
|
||||
defaultDisableErrorVerbose = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
}
|
||||
|
||||
func adjustString(v *string, defValue string) {
|
||||
if len(*v) == 0 {
|
||||
*v = defValue
|
||||
}
|
||||
}
|
||||
|
||||
func adjustUint64(v *uint64, defValue uint64) {
|
||||
if *v == 0 {
|
||||
*v = defValue
|
||||
}
|
||||
}
|
||||
|
||||
func adjustInt64(v *int64, defValue int64) {
|
||||
if *v == 0 {
|
||||
*v = defValue
|
||||
}
|
||||
}
|
||||
|
||||
func adjustFloat64(v *float64, defValue float64) {
|
||||
if *v == 0 {
|
||||
*v = defValue
|
||||
}
|
||||
}
|
||||
|
||||
func adjustDuration(v *typeutil.Duration, defValue time.Duration) {
|
||||
if v.Duration == 0 {
|
||||
v.Duration = defValue
|
||||
}
|
||||
}
|
||||
|
||||
func adjustPath(p *string) {
|
||||
absPath, err := filepath.Abs(*p)
|
||||
if err == nil {
|
||||
*p = absPath
|
||||
}
|
||||
}
|
||||
|
||||
// Parse parses flag definitions from the argument list.
|
||||
func (c *Config) Parse(arguments []string) error {
|
||||
// Parse first to get config file.
|
||||
err := c.flagSet.Parse(arguments)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Load config file if specified.
|
||||
var meta *toml.MetaData
|
||||
if c.configFile != "" {
|
||||
meta, err = c.configFromFile(c.configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Backward compatibility for toml config
|
||||
if c.LogFileDeprecated != "" {
|
||||
msg := fmt.Sprintf("log-file in %s is deprecated, use [log.file] instead", c.configFile)
|
||||
c.WarningMsgs = append(c.WarningMsgs, msg)
|
||||
if c.Log.File.Filename == "" {
|
||||
c.Log.File.Filename = c.LogFileDeprecated
|
||||
}
|
||||
}
|
||||
if c.LogLevelDeprecated != "" {
|
||||
msg := fmt.Sprintf("log-level in %s is deprecated, use [log] instead", c.configFile)
|
||||
c.WarningMsgs = append(c.WarningMsgs, msg)
|
||||
if c.Log.Level == "" {
|
||||
c.Log.Level = c.LogLevelDeprecated
|
||||
}
|
||||
}
|
||||
if meta.IsDefined("schedule", "disable-raft-learner") {
|
||||
msg := fmt.Sprintf("disable-raft-learner in %s is deprecated", c.configFile)
|
||||
c.WarningMsgs = append(c.WarningMsgs, msg)
|
||||
}
|
||||
if meta.IsDefined("dashboard", "disable-telemetry") {
|
||||
msg := fmt.Sprintf("disable-telemetry in %s is deprecated, use enable-telemetry instead", c.configFile)
|
||||
c.WarningMsgs = append(c.WarningMsgs, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse again to replace with command line options.
|
||||
err = c.flagSet.Parse(arguments)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if len(c.flagSet.Args()) != 0 {
|
||||
return errors.Errorf("'%s' is an invalid flag", c.flagSet.Arg(0))
|
||||
}
|
||||
|
||||
err = c.Adjust(meta)
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate is used to validate if some configurations are right.
|
||||
func (c *Config) Validate() error {
|
||||
dataDir, err := filepath.Abs(c.DataDir)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
logFile, err := filepath.Abs(c.Log.File.Filename)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
rel, err := filepath.Rel(dataDir, filepath.Dir(logFile))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !strings.HasPrefix(rel, "..") {
|
||||
return errors.New("log directory shouldn't be the subdirectory of data directory")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Utility to test if a configuration is defined.
|
||||
type configMetaData struct {
|
||||
meta *toml.MetaData
|
||||
path []string
|
||||
}
|
||||
|
||||
func newConfigMetadata(meta *toml.MetaData) *configMetaData {
|
||||
return &configMetaData{meta: meta}
|
||||
}
|
||||
|
||||
func (m *configMetaData) IsDefined(key string) bool {
|
||||
if m.meta == nil {
|
||||
return false
|
||||
}
|
||||
keys := append([]string(nil), m.path...)
|
||||
keys = append(keys, key)
|
||||
return m.meta.IsDefined(keys...)
|
||||
}
|
||||
|
||||
func (m *configMetaData) Child(path ...string) *configMetaData {
|
||||
newPath := append([]string(nil), m.path...)
|
||||
newPath = append(newPath, path...)
|
||||
return &configMetaData{
|
||||
meta: m.meta,
|
||||
path: newPath,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *configMetaData) CheckUndecoded() error {
|
||||
if m.meta == nil {
|
||||
return nil
|
||||
}
|
||||
undecoded := m.meta.Undecoded()
|
||||
if len(undecoded) == 0 {
|
||||
return nil
|
||||
}
|
||||
errInfo := "Config contains undefined item: "
|
||||
for _, key := range undecoded {
|
||||
errInfo += key.String() + ", "
|
||||
}
|
||||
return errors.New(errInfo[:len(errInfo)-2])
|
||||
}
|
||||
|
||||
// Adjust is used to adjust the PD configurations.
|
||||
func (c *Config) Adjust(meta *toml.MetaData) error {
|
||||
configMetaData := newConfigMetadata(meta)
|
||||
if err := configMetaData.CheckUndecoded(); err != nil {
|
||||
c.WarningMsgs = append(c.WarningMsgs, err.Error())
|
||||
}
|
||||
|
||||
if c.Name == "" {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adjustString(&c.Name, fmt.Sprintf("%s-%s", defaultName, hostname))
|
||||
}
|
||||
adjustString(&c.DataDir, fmt.Sprintf("default.%s", c.Name))
|
||||
adjustPath(&c.DataDir)
|
||||
|
||||
if err := c.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
adjustString(&c.ClientUrls, defaultClientUrls)
|
||||
adjustString(&c.AdvertiseClientUrls, c.ClientUrls)
|
||||
adjustString(&c.PeerUrls, defaultPeerUrls)
|
||||
adjustString(&c.AdvertisePeerUrls, c.PeerUrls)
|
||||
|
||||
if len(c.InitialCluster) == 0 {
|
||||
// The advertise peer urls may be http://127.0.0.1:2380,http://127.0.0.1:2381
|
||||
// so the initial cluster is pd=http://127.0.0.1:2380,pd=http://127.0.0.1:2381
|
||||
items := strings.Split(c.AdvertisePeerUrls, ",")
|
||||
|
||||
sep := ""
|
||||
for _, item := range items {
|
||||
c.InitialCluster += fmt.Sprintf("%s%s=%s", sep, c.Name, item)
|
||||
sep = ","
|
||||
}
|
||||
}
|
||||
|
||||
adjustString(&c.InitialClusterState, defaultInitialClusterState)
|
||||
adjustString(&c.InitialClusterToken, defaultInitialClusterToken)
|
||||
|
||||
adjustInt64(&c.LeaderLease, defaultLeaderLease)
|
||||
|
||||
adjustDuration(&c.TsoSaveInterval, time.Duration(defaultLeaderLease)*time.Second)
|
||||
|
||||
adjustDuration(&c.TickInterval, defaultTickInterval)
|
||||
adjustDuration(&c.ElectionInterval, defaultElectionInterval)
|
||||
|
||||
|
||||
if err := c.PDServerCfg.adjust(configMetaData.Child("pd-server")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.adjustLog(configMetaData.Child("log"))
|
||||
adjustDuration(&c.HeartbeatStreamBindInterval, defaultHeartbeatStreamRebindInterval)
|
||||
|
||||
if !configMetaData.IsDefined("enable-prevote") {
|
||||
c.PreVote = true
|
||||
}
|
||||
if !configMetaData.IsDefined("enable-grpc-gateway") {
|
||||
c.EnableGRPCGateway = defaultEnableGRPCGateway
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) adjustLog(meta *configMetaData) {
|
||||
if !meta.IsDefined("disable-error-verbose") {
|
||||
c.Log.DisableErrorVerbose = defaultDisableErrorVerbose
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a cloned configuration.
|
||||
func (c *Config) Clone() *Config {
|
||||
cfg := &Config{}
|
||||
*cfg = *c
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *Config) String() string {
|
||||
data, err := json.MarshalIndent(c, "", " ")
|
||||
if err != nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// configFromFile loads config from file.
|
||||
func (c *Config) configFromFile(path string) (*toml.MetaData, error) {
|
||||
meta, err := toml.DecodeFile(path, c)
|
||||
return &meta, errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
||||
// PDServerConfig is the configuration for pd server.
|
||||
type PDServerConfig struct {
|
||||
// MaxResetTSGap is the max gap to reset the tso.
|
||||
MaxResetTSGap typeutil.Duration `toml:"max-gap-reset-ts" json:"max-gap-reset-ts"`
|
||||
}
|
||||
|
||||
func (c *PDServerConfig) adjust(meta *configMetaData) error {
|
||||
adjustDuration(&c.MaxResetTSGap, defaultMaxResetTSGap)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a cloned PD server config.
|
||||
func (c *PDServerConfig) Clone() *PDServerConfig {
|
||||
return &PDServerConfig{
|
||||
MaxResetTSGap: c.MaxResetTSGap,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ParseUrls parse a string into multiple urls.
|
||||
// Export for api.
|
||||
func ParseUrls(s string) ([]url.URL, error) {
|
||||
items := strings.Split(s, ",")
|
||||
urls := make([]url.URL, 0, len(items))
|
||||
for _, item := range items {
|
||||
u, err := url.Parse(item)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
urls = append(urls, *u)
|
||||
}
|
||||
|
||||
return urls, nil
|
||||
}
|
||||
|
||||
// SetupLogger setup the logger.
|
||||
func (c *Config) SetupLogger() error {
|
||||
lg, p, err := log.InitLogger(&c.Log, zap.AddStacktrace(zapcore.FatalLevel))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger = lg
|
||||
c.logProps = p
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetZapLogger gets the created zap logger.
|
||||
func (c *Config) GetZapLogger() *zap.Logger {
|
||||
return c.logger
|
||||
}
|
||||
|
||||
// GetZapLogProperties gets properties of the zap logger.
|
||||
func (c *Config) GetZapLogProperties() *log.ZapProperties {
|
||||
return c.logProps
|
||||
}
|
||||
|
||||
// GetConfigFile gets the config file.
|
||||
func (c *Config) GetConfigFile() string {
|
||||
return c.configFile
|
||||
}
|
||||
|
||||
// RewriteFile rewrites the config file after updating the config.
|
||||
func (c *Config) RewriteFile(new *Config) error {
|
||||
filePath := c.GetConfigFile()
|
||||
if filePath == "" {
|
||||
return nil
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := toml.NewEncoder(&buf).Encode(*new); err != nil {
|
||||
return err
|
||||
}
|
||||
dir := filepath.Dir(filePath)
|
||||
tmpfile := filepath.Join(dir, "tmp_pd.toml")
|
||||
|
||||
f, err := os.Create(tmpfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tmpfile, filePath)
|
||||
}
|
||||
|
||||
func (c *Config) GenEmbedEtcdConfig() (*embed.Config, error) {
|
||||
cfg := embed.NewConfig()
|
||||
cfg.Name = c.Name
|
||||
cfg.Dir = c.DataDir
|
||||
cfg.WalDir = ""
|
||||
cfg.InitialCluster = c.InitialCluster
|
||||
cfg.ClusterState = c.InitialClusterState
|
||||
cfg.InitialClusterToken = c.InitialClusterToken
|
||||
cfg.EnablePprof = true
|
||||
cfg.PreVote = c.PreVote
|
||||
cfg.StrictReconfigCheck = !c.DisableStrictReconfigCheck
|
||||
cfg.TickMs = uint(c.TickInterval.Duration / time.Millisecond)
|
||||
cfg.ElectionMs = uint(c.ElectionInterval.Duration / time.Millisecond)
|
||||
|
||||
allowedCN, serr := c.Security.GetOneAllowedCN()
|
||||
if serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
cfg.ClientTLSInfo.ClientCertAuth = len(c.Security.CAPath) != 0
|
||||
cfg.ClientTLSInfo.TrustedCAFile = c.Security.CAPath
|
||||
cfg.ClientTLSInfo.CertFile = c.Security.CertPath
|
||||
cfg.ClientTLSInfo.KeyFile = c.Security.KeyPath
|
||||
// Client no need to set the CN. (cfg.ClientTLSInfo.AllowedCN = allowedCN)
|
||||
cfg.PeerTLSInfo.ClientCertAuth = len(c.Security.CAPath) != 0
|
||||
cfg.PeerTLSInfo.TrustedCAFile = c.Security.CAPath
|
||||
cfg.PeerTLSInfo.CertFile = c.Security.CertPath
|
||||
cfg.PeerTLSInfo.KeyFile = c.Security.KeyPath
|
||||
cfg.PeerTLSInfo.AllowedCN = allowedCN
|
||||
cfg.ZapLoggerBuilder = embed.NewZapCoreLoggerBuilder(c.logger, c.logger.Core(), c.logProps.Syncer)
|
||||
cfg.EnableGRPCGateway = c.EnableGRPCGateway
|
||||
cfg.EnableV2 = true
|
||||
cfg.Logger = "zap"
|
||||
var err error
|
||||
|
||||
cfg.LPUrls, err = ParseUrls(c.PeerUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.APUrls, err = ParseUrls(c.AdvertisePeerUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.LCUrls, err = ParseUrls(c.ClientUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.ACUrls, err = ParseUrls(c.AdvertiseClientUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
77
master/config/persist_options.go
Normal file
77
master/config/persist_options.go
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
//"github.com/czs007/suvlim/master/kv"
|
||||
"github.com/czs007/suvlim/master/meta"
|
||||
|
||||
)
|
||||
|
||||
// PersistOptions wraps all configurations that need to persist to storage and
|
||||
// allows to access them safely.
|
||||
type PersistOptions struct {
|
||||
pdServerConfig atomic.Value
|
||||
}
|
||||
|
||||
// NewPersistOptions creates a new PersistOptions instance.
|
||||
func NewPersistOptions(cfg *Config) *PersistOptions {
|
||||
o := &PersistOptions{}
|
||||
o.pdServerConfig.Store(&cfg.PDServerCfg)
|
||||
return o
|
||||
}
|
||||
|
||||
// GetPDServerConfig returns pd server configurations.
|
||||
func (o *PersistOptions) GetPDServerConfig() *PDServerConfig {
|
||||
return o.pdServerConfig.Load().(*PDServerConfig)
|
||||
}
|
||||
|
||||
// SetPDServerConfig sets the PD configuration.
|
||||
func (o *PersistOptions) SetPDServerConfig(cfg *PDServerConfig) {
|
||||
o.pdServerConfig.Store(cfg)
|
||||
}
|
||||
|
||||
|
||||
// GetMaxResetTSGap gets the max gap to reset the tso.
|
||||
func (o *PersistOptions) GetMaxResetTSGap() time.Duration {
|
||||
return o.GetPDServerConfig().MaxResetTSGap.Duration
|
||||
}
|
||||
|
||||
|
||||
// Persist saves the configuration to the storage.
|
||||
func (o *PersistOptions) Persist(storage *meta.Storage) error {
|
||||
cfg := &Config{
|
||||
PDServerCfg: *o.GetPDServerConfig(),
|
||||
}
|
||||
err := storage.SaveConfig(cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload reloads the configuration from the storage.
|
||||
func (o *PersistOptions) Reload(storage *meta.Storage) error {
|
||||
cfg := &Config{}
|
||||
// pass nil to initialize cfg to default values (all items undefined)
|
||||
cfg.Adjust(nil)
|
||||
|
||||
isExist, err := storage.LoadConfig(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isExist {
|
||||
o.pdServerConfig.Store(&cfg.PDServerCfg)
|
||||
}
|
||||
return nil
|
||||
}
|
64
master/config/util.go
Normal file
64
master/config/util.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2019 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/pkg/metapb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label key consists of alphanumeric characters, '-', '_', '.' or '/', and must start and end with an
|
||||
// alphanumeric character. If can also contain an extra '$' at the beginning.
|
||||
keyFormat = "^[$]?[A-Za-z0-9]([-A-Za-z0-9_./]*[A-Za-z0-9])?$"
|
||||
// Value key can be any combination of alphanumeric characters, '-', '_', '.' or '/'. It can also be empty to
|
||||
// mark the label as deleted.
|
||||
valueFormat = "^[-A-Za-z0-9_./]*$"
|
||||
)
|
||||
|
||||
func validateFormat(s, format string) error {
|
||||
isValid, _ := regexp.MatchString(format, s)
|
||||
if !isValid {
|
||||
return errors.Errorf("%s does not match format %q", s, format)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateLabels checks the legality of the labels.
|
||||
func ValidateLabels(labels []*metapb.StoreLabel) error {
|
||||
for _, label := range labels {
|
||||
if err := validateFormat(label.Key, keyFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateFormat(label.Value, valueFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateURLWithScheme checks the format of the URL.
|
||||
func ValidateURLWithScheme(rawURL string) error {
|
||||
u, err := url.ParseRequestURI(rawURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u.Scheme == "" || u.Host == "" {
|
||||
return errors.Errorf("%s has no scheme", rawURL)
|
||||
}
|
||||
return nil
|
||||
}
|
78
master/config/util_test.go
Normal file
78
master/config/util_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2018 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/czs007/suvlim/pkg/metapb"
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
var _ = Suite(&testUtilSuite{})
|
||||
|
||||
type testUtilSuite struct{}
|
||||
|
||||
func (s *testUtilSuite) TestValidateLabels(c *C) {
|
||||
tests := []struct {
|
||||
label string
|
||||
hasErr bool
|
||||
}{
|
||||
{"z1", false},
|
||||
{"z-1", false},
|
||||
{"h1;", true},
|
||||
{"z_1", false},
|
||||
{"z_1&", true},
|
||||
{"cn", false},
|
||||
{"Zo^ne", true},
|
||||
{"z_", true},
|
||||
{"hos&t-15", true},
|
||||
{"_test1", true},
|
||||
{"-test1", true},
|
||||
{"192.168.199.1", false},
|
||||
{"www.pingcap.com", false},
|
||||
{"h_127.0.0.1", false},
|
||||
{"a", false},
|
||||
{"a/b", false},
|
||||
{"ab/", true},
|
||||
{"/ab", true},
|
||||
{"$abc", false},
|
||||
{"$", true},
|
||||
{"a$b", true},
|
||||
{"$$", true},
|
||||
}
|
||||
for _, t := range tests {
|
||||
c.Assert(ValidateLabels([]*metapb.StoreLabel{{Key: t.label}}) != nil, Equals, t.hasErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testUtilSuite) TestValidateURLWithScheme(c *C) {
|
||||
tests := []struct {
|
||||
addr string
|
||||
hasErr bool
|
||||
}{
|
||||
{"", true},
|
||||
{"foo", true},
|
||||
{"/foo", true},
|
||||
{"http", true},
|
||||
{"http://", true},
|
||||
{"http://foo", false},
|
||||
{"https://foo", false},
|
||||
{"http://127.0.0.1", false},
|
||||
{"http://127.0.0.1/", false},
|
||||
{"https://foo.com/bar", false},
|
||||
{"https://foo.com/bar/", false},
|
||||
}
|
||||
for _, t := range tests {
|
||||
c.Assert(ValidateURLWithScheme(t.addr) != nil, Equals, t.hasErr)
|
||||
}
|
||||
}
|
119
master/election/leadership.go
Normal file
119
master/election/leadership.go
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2020 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package election
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/czs007/suvlim/master/kv"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
// GetLeader gets the corresponding leader from etcd by given leaderPath (as the key).
|
||||
func GetLeader(c *clientv3.Client, leaderPath string) (*pdpb.Member, int64, error) {
|
||||
leader := &pdpb.Member{}
|
||||
ok, rev, err := etcdutil.GetProtoMsgWithModRev(c, leaderPath, leader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
return leader, rev, nil
|
||||
}
|
||||
|
||||
// Leadership is used to manage the leadership campaigning.
|
||||
type Leadership struct {
|
||||
// purpose is used to show what this election for
|
||||
purpose string
|
||||
// The lease which is used to get this leadership
|
||||
lease atomic.Value // stored as *lease
|
||||
client *clientv3.Client
|
||||
// leaderKey and leaderValue are key-value pair in etcd
|
||||
leaderKey string
|
||||
leaderValue string
|
||||
}
|
||||
|
||||
// NewLeadership creates a new Leadership.
|
||||
func NewLeadership(client *clientv3.Client, leaderKey, purpose string) *Leadership {
|
||||
leadership := &Leadership{
|
||||
purpose: purpose,
|
||||
client: client,
|
||||
leaderKey: leaderKey,
|
||||
}
|
||||
return leadership
|
||||
}
|
||||
|
||||
// getLease gets the lease of leadership, only if leadership is valid,
|
||||
// i.e the owner is a true leader, the lease is not nil.
|
||||
func (ls *Leadership) getLease() *lease {
|
||||
l := ls.lease.Load()
|
||||
if l == nil {
|
||||
return nil
|
||||
}
|
||||
return l.(*lease)
|
||||
}
|
||||
|
||||
func (ls *Leadership) setLease(lease *lease) {
|
||||
ls.lease.Store(lease)
|
||||
}
|
||||
|
||||
// GetClient is used to get the etcd client.
|
||||
func (ls *Leadership) GetClient() *clientv3.Client {
|
||||
return ls.client
|
||||
}
|
||||
|
||||
// Keep will keep the leadership available by update the lease's expired time continuously
|
||||
func (ls *Leadership) Keep(ctx context.Context) {
|
||||
ls.getLease().KeepAlive(ctx)
|
||||
}
|
||||
|
||||
// Check returns whether the leadership is still available
|
||||
func (ls *Leadership) Check() bool {
|
||||
return ls != nil && ls.getLease() != nil && !ls.getLease().IsExpired()
|
||||
}
|
||||
|
||||
// LeaderTxn returns txn() with a leader comparison to guarantee that
|
||||
// the transaction can be executed only if the server is leader.
|
||||
func (ls *Leadership) LeaderTxn(cs ...clientv3.Cmp) clientv3.Txn {
|
||||
txn := kv.NewSlowLogTxn(ls.client)
|
||||
return txn.If(append(cs, ls.leaderCmp())...)
|
||||
}
|
||||
|
||||
func (ls *Leadership) leaderCmp() clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.Value(ls.leaderKey), "=", ls.leaderValue)
|
||||
}
|
||||
|
||||
// DeleteLeader deletes the corresponding leader from etcd by given leaderPath (as the key).
|
||||
func (ls *Leadership) DeleteLeader() error {
|
||||
// delete leader itself and let others start a new election again.
|
||||
resp, err := ls.LeaderTxn().Then(clientv3.OpDelete(ls.leaderKey)).Commit()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
return errors.New("resign leader failed, we are not leader already")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset does some defer job such as closing lease, resetting lease etc.
|
||||
func (ls *Leadership) Reset() {
|
||||
ls.getLease().Close()
|
||||
}
|
149
master/election/lease.go
Normal file
149
master/election/lease.go
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2019 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package election
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/pingcap/log"
|
||||
"github.com/tikv/pd/pkg/etcdutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
revokeLeaseTimeout = time.Second
|
||||
requestTimeout = etcdutil.DefaultRequestTimeout
|
||||
slowRequestTime = etcdutil.DefaultSlowRequestTime
|
||||
)
|
||||
|
||||
// lease is used as the low-level mechanism for campaigning and renewing elected leadership.
|
||||
// The way to gain and maintain leadership is to update and keep the lease alive continuously.
|
||||
type lease struct {
|
||||
// purpose is used to show what this election for
|
||||
Purpose string
|
||||
// etcd client and lease
|
||||
client *clientv3.Client
|
||||
lease clientv3.Lease
|
||||
ID clientv3.LeaseID
|
||||
// leaseTimeout and expireTime are used to control the lease's lifetime
|
||||
leaseTimeout time.Duration
|
||||
expireTime atomic.Value
|
||||
}
|
||||
|
||||
// Grant uses `lease.Grant` to initialize the lease and expireTime.
|
||||
func (l *lease) Grant(leaseTimeout int64) error {
|
||||
start := time.Now()
|
||||
ctx, cancel := context.WithTimeout(l.client.Ctx(), requestTimeout)
|
||||
leaseResp, err := l.lease.Grant(ctx, leaseTimeout)
|
||||
cancel()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if cost := time.Since(start); cost > slowRequestTime {
|
||||
log.Warn("lease grants too slow", zap.Duration("cost", cost), zap.String("purpose", l.Purpose))
|
||||
}
|
||||
log.Info("lease granted", zap.Int64("lease-id", int64(leaseResp.ID)), zap.Int64("lease-timeout", leaseTimeout), zap.String("purpose", l.Purpose))
|
||||
l.ID = leaseResp.ID
|
||||
l.leaseTimeout = time.Duration(leaseTimeout) * time.Second
|
||||
l.expireTime.Store(start.Add(time.Duration(leaseResp.TTL) * time.Second))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close releases the lease.
|
||||
func (l *lease) Close() error {
|
||||
// Reset expire time.
|
||||
l.expireTime.Store(time.Time{})
|
||||
// Try to revoke lease to make subsequent elections faster.
|
||||
ctx, cancel := context.WithTimeout(l.client.Ctx(), revokeLeaseTimeout)
|
||||
defer cancel()
|
||||
l.lease.Revoke(ctx, l.ID)
|
||||
return l.lease.Close()
|
||||
}
|
||||
|
||||
// IsExpired checks if the lease is expired. If it returns true,
|
||||
// current leader should step down and try to re-elect again.
|
||||
func (l *lease) IsExpired() bool {
|
||||
if l.expireTime.Load() == nil {
|
||||
return false
|
||||
}
|
||||
return time.Now().After(l.expireTime.Load().(time.Time))
|
||||
}
|
||||
|
||||
// KeepAlive auto renews the lease and update expireTime.
|
||||
func (l *lease) KeepAlive(ctx context.Context) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
timeCh := l.keepAliveWorker(ctx, l.leaseTimeout/3)
|
||||
|
||||
var maxExpire time.Time
|
||||
for {
|
||||
select {
|
||||
case t := <-timeCh:
|
||||
if t.After(maxExpire) {
|
||||
maxExpire = t
|
||||
l.expireTime.Store(t)
|
||||
}
|
||||
case <-time.After(l.leaseTimeout):
|
||||
log.Info("lease timeout", zap.Time("expire", l.expireTime.Load().(time.Time)), zap.String("purpose", l.Purpose))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Periodically call `lease.KeepAliveOnce` and post back latest received expire time into the channel.
|
||||
func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-chan time.Time {
|
||||
ch := make(chan time.Time)
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
log.Info("start lease keep alive worker", zap.Duration("interval", interval), zap.String("purpose", l.Purpose))
|
||||
defer log.Info("stop lease keep alive worker", zap.String("purpose", l.Purpose))
|
||||
|
||||
for {
|
||||
go func() {
|
||||
start := time.Now()
|
||||
ctx1, cancel := context.WithTimeout(ctx, l.leaseTimeout)
|
||||
defer cancel()
|
||||
res, err := l.lease.KeepAliveOnce(ctx1, l.ID)
|
||||
if err != nil {
|
||||
log.Warn("lease keep alive failed", zap.Error(err), zap.String("purpose", l.Purpose))
|
||||
return
|
||||
}
|
||||
if res.TTL > 0 {
|
||||
expire := start.Add(time.Duration(res.TTL) * time.Second)
|
||||
select {
|
||||
case ch <- expire:
|
||||
case <-ctx1.Done():
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
196
master/grpc_service.go
Normal file
196
master/grpc_service.go
Normal file
@ -0,0 +1,196 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
//"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/pingcap/log"
|
||||
//"github.com/czs007/suvlim/util/tsoutil"
|
||||
//"github.com/tikv/pd/server/cluster"
|
||||
//"github.com/tikv/pd/server/core"
|
||||
//"github.com/tikv/pd/server/versioninfo"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const slowThreshold = 5 * time.Millisecond
|
||||
|
||||
// gRPC errors
|
||||
var (
|
||||
// ErrNotLeader is returned when current server is not the leader and not possible to process request.
|
||||
// TODO: work as proxy.
|
||||
ErrNotLeader = status.Errorf(codes.Unavailable, "not leader")
|
||||
ErrNotStarted = status.Errorf(codes.Unavailable, "server not started")
|
||||
)
|
||||
|
||||
// Tso implements gRPC PDServer.
|
||||
func (s *Server) Tso(stream pdpb.PD_TsoServer) error {
|
||||
for {
|
||||
request, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
start := time.Now()
|
||||
// TSO uses leader lease to determine validity. No need to check leader here.
|
||||
if s.IsClosed() {
|
||||
return status.Errorf(codes.Unknown, "server not started")
|
||||
}
|
||||
if request.GetHeader().GetClusterId() != s.clusterID {
|
||||
return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, request.GetHeader().GetClusterId())
|
||||
}
|
||||
count := request.GetCount()
|
||||
ts, err := s.tsoAllocator.GenerateTSO(count)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
if elapsed > slowThreshold {
|
||||
log.Warn("get timestamp too slow", zap.Duration("cost", elapsed))
|
||||
}
|
||||
response := &pdpb.TsoResponse{
|
||||
Header: s.header(),
|
||||
Timestamp: &ts,
|
||||
Count: count,
|
||||
}
|
||||
if err := stream.Send(response); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// AllocID implements gRPC PDServer.
|
||||
func (s *Server) AllocID(ctx context.Context, request *pdpb.AllocIDRequest) (*pdpb.AllocIDResponse, error) {
|
||||
if err := s.validateRequest(request.GetHeader()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We can use an allocator for all types ID allocation.
|
||||
id, err := s.idAllocator.Alloc()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
return &pdpb.AllocIDResponse{
|
||||
Header: s.header(),
|
||||
Id: id,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
const heartbeatSendTimeout = 5 * time.Second
|
||||
|
||||
var errSendHeartbeatTimeout = errors.New("send region heartbeat timeout")
|
||||
|
||||
// heartbeatServer wraps PD_RegionHeartbeatServer to ensure when any error
|
||||
// occurs on Send() or Recv(), both endpoints will be closed.
|
||||
type heartbeatServer struct {
|
||||
stream pdpb.PD_HeartbeatServer
|
||||
closed int32
|
||||
}
|
||||
|
||||
func (s *heartbeatServer) Send(m *pdpb.HeartbeatResponse) error {
|
||||
if atomic.LoadInt32(&s.closed) == 1 {
|
||||
return io.EOF
|
||||
}
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- s.stream.Send(m) }()
|
||||
select {
|
||||
case err := <-done:
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&s.closed, 1)
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
case <-time.After(heartbeatSendTimeout):
|
||||
atomic.StoreInt32(&s.closed, 1)
|
||||
return errors.WithStack(errSendHeartbeatTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *heartbeatServer) Recv() (*pdpb.HeartbeatRequest, error) {
|
||||
if atomic.LoadInt32(&s.closed) == 1 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
req, err := s.stream.Recv()
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&s.closed, 1)
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// RegionHeartbeat implements gRPC PDServer.
|
||||
func (s *Server) Heartbeat(stream pdpb.PD_HeartbeatServer) error {
|
||||
server := &heartbeatServer{stream: stream}
|
||||
|
||||
for {
|
||||
request, err := server.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err = s.validateRequest(request.GetHeader()); err != nil {
|
||||
return err
|
||||
}
|
||||
//msg:= "OK"
|
||||
//s.hbStreams.SendMsg(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// validateRequest checks if Server is leader and clusterID is matched.
|
||||
// TODO: Call it in gRPC interceptor.
|
||||
func (s *Server) validateRequest(header *pdpb.RequestHeader) error {
|
||||
if s.IsClosed() {
|
||||
return errors.WithStack(ErrNotLeader)
|
||||
}
|
||||
if header.GetClusterId() != s.clusterID {
|
||||
return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, header.GetClusterId())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) header() *pdpb.ResponseHeader {
|
||||
return &pdpb.ResponseHeader{ClusterId: s.clusterID}
|
||||
}
|
||||
|
||||
func (s *Server) errorHeader(err *pdpb.Error) *pdpb.ResponseHeader {
|
||||
return &pdpb.ResponseHeader{
|
||||
ClusterId: s.clusterID,
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) notBootstrappedHeader() *pdpb.ResponseHeader {
|
||||
return s.errorHeader(&pdpb.Error{
|
||||
Type: pdpb.ErrorType_NOT_BOOTSTRAPPED,
|
||||
Message: "cluster is not bootstrapped",
|
||||
})
|
||||
}
|
||||
|
||||
|
135
master/heartbeat_streams.go
Normal file
135
master/heartbeat_streams.go
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/util/logutil"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
)
|
||||
|
||||
// HeartbeatStream is an interface.
|
||||
type HeartbeatStream interface {
|
||||
Send(*pdpb.HeartbeatResponse) error
|
||||
}
|
||||
|
||||
// HeartbeatStreams is an interface of async region heartbeat.
|
||||
type HeartbeatStreams interface {
|
||||
SendMsg(msg *pdpb.HeartbeatResponse)
|
||||
BindStream(peerID uint64, stream HeartbeatStream)
|
||||
}
|
||||
|
||||
const (
|
||||
heartbeatStreamKeepAliveInterval = time.Minute
|
||||
heartbeatChanCapacity = 1024
|
||||
)
|
||||
|
||||
type streamUpdate struct {
|
||||
peerID uint64
|
||||
stream HeartbeatStream
|
||||
}
|
||||
|
||||
type heartbeatStreams struct {
|
||||
wg sync.WaitGroup
|
||||
hbStreamCtx context.Context
|
||||
hbStreamCancel context.CancelFunc
|
||||
clusterID uint64
|
||||
streams map[uint64]HeartbeatStream
|
||||
msgCh chan *pdpb.HeartbeatResponse
|
||||
streamCh chan streamUpdate
|
||||
}
|
||||
|
||||
func newHeartbeatStreams(ctx context.Context, clusterID uint64) *heartbeatStreams {
|
||||
hbStreamCtx, hbStreamCancel := context.WithCancel(ctx)
|
||||
hs := &heartbeatStreams{
|
||||
hbStreamCtx: hbStreamCtx,
|
||||
hbStreamCancel: hbStreamCancel,
|
||||
clusterID: clusterID,
|
||||
streams: make(map[uint64]HeartbeatStream),
|
||||
msgCh: make(chan *pdpb.HeartbeatResponse, heartbeatChanCapacity),
|
||||
streamCh: make(chan streamUpdate, 1),
|
||||
}
|
||||
hs.wg.Add(1)
|
||||
go hs.run()
|
||||
return hs
|
||||
}
|
||||
|
||||
func (s *heartbeatStreams) run() {
|
||||
defer logutil.LogPanic()
|
||||
|
||||
defer s.wg.Done()
|
||||
|
||||
keepAliveTicker := time.NewTicker(heartbeatStreamKeepAliveInterval)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
//keepAlive := &pdpb.HeartbeatResponse{Header: &pdpb.ResponseHeader{ClusterId: s.clusterID}}
|
||||
|
||||
for {
|
||||
select {
|
||||
case update := <-s.streamCh:
|
||||
s.streams[update.peerID] = update.stream
|
||||
case msg := <-s.msgCh:
|
||||
println("msgCh", msg)
|
||||
case <-keepAliveTicker.C:
|
||||
println("keepAlive")
|
||||
case <-s.hbStreamCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *heartbeatStreams) Close() {
|
||||
s.hbStreamCancel()
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
||||
func (s *heartbeatStreams) BindStream(peerID uint64, stream HeartbeatStream) {
|
||||
update := streamUpdate{
|
||||
peerID: peerID,
|
||||
stream: stream,
|
||||
}
|
||||
select {
|
||||
case s.streamCh <- update:
|
||||
case <-s.hbStreamCtx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (s *heartbeatStreams) SendMsg(msg *pdpb.HeartbeatResponse) {
|
||||
msg.Header = &pdpb.ResponseHeader{ClusterId: s.clusterID}
|
||||
select {
|
||||
case s.msgCh <- msg:
|
||||
case <-s.hbStreamCtx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
func (s *heartbeatStreams) sendErr(errType pdpb.ErrorType, errMsg string) {
|
||||
|
||||
msg := &pdpb.HeartbeatResponse{
|
||||
Header: &pdpb.ResponseHeader{
|
||||
ClusterId: s.clusterID,
|
||||
Error: &pdpb.Error{
|
||||
Type: errType,
|
||||
Message: errMsg,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
select {
|
||||
case s.msgCh <- msg:
|
||||
case <-s.hbStreamCtx.Done():
|
||||
}
|
||||
}
|
116
master/id/id.go
Normal file
116
master/id/id.go
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package id
|
||||
|
||||
import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/master/kv"
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"github.com/pingcap/log"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Allocator is the allocator to generate unique ID.
|
||||
type Allocator interface {
|
||||
Alloc() (uint64, error)
|
||||
}
|
||||
|
||||
const allocStep = uint64(1000)
|
||||
|
||||
// AllocatorImpl is used to allocate ID.
|
||||
type AllocatorImpl struct {
|
||||
mu sync.Mutex
|
||||
base uint64
|
||||
end uint64
|
||||
|
||||
client *clientv3.Client
|
||||
rootPath string
|
||||
member string
|
||||
}
|
||||
|
||||
// NewAllocatorImpl creates a new IDAllocator.
|
||||
func NewAllocatorImpl(client *clientv3.Client, rootPath string, member string) *AllocatorImpl {
|
||||
return &AllocatorImpl{client: client, rootPath: rootPath, member: member}
|
||||
}
|
||||
|
||||
// Alloc returns a new id.
|
||||
func (alloc *AllocatorImpl) Alloc() (uint64, error) {
|
||||
alloc.mu.Lock()
|
||||
defer alloc.mu.Unlock()
|
||||
|
||||
if alloc.base == alloc.end {
|
||||
end, err := alloc.generate()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
alloc.end = end
|
||||
alloc.base = alloc.end - allocStep
|
||||
}
|
||||
|
||||
alloc.base++
|
||||
|
||||
return alloc.base, nil
|
||||
}
|
||||
|
||||
func (alloc *AllocatorImpl) generate() (uint64, error) {
|
||||
key := alloc.getAllocIDPath()
|
||||
value, err := etcdutil.GetValue(alloc.client, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var (
|
||||
cmp clientv3.Cmp
|
||||
end uint64
|
||||
)
|
||||
|
||||
if value == nil {
|
||||
// create the key
|
||||
cmp = clientv3.Compare(clientv3.CreateRevision(key), "=", 0)
|
||||
} else {
|
||||
// update the key
|
||||
end, err = typeutil.BytesToUint64(value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
cmp = clientv3.Compare(clientv3.Value(key), "=", string(value))
|
||||
}
|
||||
|
||||
end += allocStep
|
||||
value = typeutil.Uint64ToBytes(end)
|
||||
txn := kv.NewSlowLogTxn(alloc.client)
|
||||
leaderPath := path.Join(alloc.rootPath, "leader")
|
||||
t := txn.If(append([]clientv3.Cmp{cmp}, clientv3.Compare(clientv3.Value(leaderPath), "=", alloc.member))...)
|
||||
resp, err := t.Then(clientv3.OpPut(key, string(value))).Commit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
return 0, errors.New("generate id failed, we may not leader")
|
||||
}
|
||||
|
||||
log.Info("idAllocator allocates a new id", zap.Uint64("alloc-id", end))
|
||||
return end, nil
|
||||
}
|
||||
|
||||
func (alloc *AllocatorImpl) getAllocIDPath() string {
|
||||
return path.Join(alloc.rootPath, "alloc_id")
|
||||
}
|
151
master/kv/etcd_kv.go
Normal file
151
master/kv/etcd_kv.go
Normal file
@ -0,0 +1,151 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
//"strings"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/pingcap/errors"
|
||||
"github.com/pingcap/log"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
requestTimeout = 10 * time.Second
|
||||
slowRequestTime = 1 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errTxnFailed = errors.New("failed to commit transaction")
|
||||
)
|
||||
|
||||
type etcdKVBase struct {
|
||||
client *clientv3.Client
|
||||
rootPath string
|
||||
}
|
||||
|
||||
// NewEtcdKVBase creates a new etcd kv.
|
||||
func NewEtcdKVBase(client *clientv3.Client, rootPath string) *etcdKVBase {
|
||||
return &etcdKVBase{
|
||||
client: client,
|
||||
rootPath: rootPath,
|
||||
}
|
||||
}
|
||||
|
||||
func (kv *etcdKVBase) Load(key string) (string, error) {
|
||||
key = path.Join(kv.rootPath, key)
|
||||
|
||||
resp, err := etcdutil.EtcdKVGet(kv.client, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if n := len(resp.Kvs); n == 0 {
|
||||
return "", nil
|
||||
} else if n > 1 {
|
||||
return "", errors.Errorf("load more than one kvs: key %v kvs %v", key, n)
|
||||
}
|
||||
return string(resp.Kvs[0].Value), nil
|
||||
}
|
||||
|
||||
func (kv *etcdKVBase) Save(key, value string) error {
|
||||
key = path.Join(kv.rootPath, key)
|
||||
|
||||
txn := NewSlowLogTxn(kv.client)
|
||||
resp, err := txn.Then(clientv3.OpPut(key, value)).Commit()
|
||||
if err != nil {
|
||||
log.Error("save to etcd meet error", zap.String("key", key), zap.String("value", value))
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
return errors.WithStack(errTxnFailed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kv *etcdKVBase) Remove(key string) error {
|
||||
key = path.Join(kv.rootPath, key)
|
||||
|
||||
txn := NewSlowLogTxn(kv.client)
|
||||
resp, err := txn.Then(clientv3.OpDelete(key)).Commit()
|
||||
if err != nil {
|
||||
log.Error("remove from etcd meet error", zap.String("key", key))
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
return errors.WithStack(errTxnFailed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SlowLogTxn wraps etcd transaction and log slow one.
|
||||
type SlowLogTxn struct {
|
||||
clientv3.Txn
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewSlowLogTxn create a SlowLogTxn.
|
||||
func NewSlowLogTxn(client *clientv3.Client) clientv3.Txn {
|
||||
ctx, cancel := context.WithTimeout(client.Ctx(), requestTimeout)
|
||||
return &SlowLogTxn{
|
||||
Txn: client.Txn(ctx),
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// If takes a list of comparison. If all comparisons passed in succeed,
|
||||
// the operations passed into Then() will be executed. Or the operations
|
||||
// passed into Else() will be executed.
|
||||
func (t *SlowLogTxn) If(cs ...clientv3.Cmp) clientv3.Txn {
|
||||
return &SlowLogTxn{
|
||||
Txn: t.Txn.If(cs...),
|
||||
cancel: t.cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Then takes a list of operations. The Ops list will be executed, if the
|
||||
// comparisons passed in If() succeed.
|
||||
func (t *SlowLogTxn) Then(ops ...clientv3.Op) clientv3.Txn {
|
||||
return &SlowLogTxn{
|
||||
Txn: t.Txn.Then(ops...),
|
||||
cancel: t.cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Commit implements Txn Commit interface.
|
||||
func (t *SlowLogTxn) Commit() (*clientv3.TxnResponse, error) {
|
||||
start := time.Now()
|
||||
resp, err := t.Txn.Commit()
|
||||
t.cancel()
|
||||
|
||||
cost := time.Since(start)
|
||||
if cost > slowRequestTime {
|
||||
log.Warn("txn runs too slow",
|
||||
zap.Error(err),
|
||||
zap.Reflect("response", resp),
|
||||
zap.Duration("cost", cost))
|
||||
}
|
||||
//label := "success"
|
||||
//if err != nil {
|
||||
// label = "failed"
|
||||
//}
|
||||
//txnCounter.WithLabelValues(label).Inc()
|
||||
//txnDuration.WithLabelValues(label).Observe(cost.Seconds())
|
||||
return resp, errors.WithStack(err)
|
||||
}
|
21
master/kv/kv.go
Normal file
21
master/kv/kv.go
Normal file
@ -0,0 +1,21 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package kv
|
||||
|
||||
// Base is an abstract interface for load/save pd cluster data.
|
||||
type Base interface {
|
||||
Load(key string) (string, error)
|
||||
Save(key, value string) error
|
||||
Remove(key string) error
|
||||
}
|
81
master/kv/mem_kv.go
Normal file
81
master/kv/mem_kv.go
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package kv
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
)
|
||||
|
||||
type memoryKV struct {
|
||||
sync.RWMutex
|
||||
tree *btree.BTree
|
||||
}
|
||||
|
||||
// NewMemoryKV returns an in-memory kvBase for testing.
|
||||
func NewMemoryKV() Base {
|
||||
return &memoryKV{
|
||||
tree: btree.New(2),
|
||||
}
|
||||
}
|
||||
|
||||
type memoryKVItem struct {
|
||||
key, value string
|
||||
}
|
||||
|
||||
func (s memoryKVItem) Less(than btree.Item) bool {
|
||||
return s.key < than.(memoryKVItem).key
|
||||
}
|
||||
|
||||
func (kv *memoryKV) Load(key string) (string, error) {
|
||||
kv.RLock()
|
||||
defer kv.RUnlock()
|
||||
item := kv.tree.Get(memoryKVItem{key, ""})
|
||||
if item == nil {
|
||||
return "", nil
|
||||
}
|
||||
return item.(memoryKVItem).value, nil
|
||||
}
|
||||
|
||||
func (kv *memoryKV) LoadRange(key, endKey string, limit int) ([]string, []string, error) {
|
||||
kv.RLock()
|
||||
defer kv.RUnlock()
|
||||
keys := make([]string, 0, limit)
|
||||
values := make([]string, 0, limit)
|
||||
kv.tree.AscendRange(memoryKVItem{key, ""}, memoryKVItem{endKey, ""}, func(item btree.Item) bool {
|
||||
keys = append(keys, item.(memoryKVItem).key)
|
||||
values = append(values, item.(memoryKVItem).value)
|
||||
if limit > 0 {
|
||||
return len(keys) < limit
|
||||
}
|
||||
return true
|
||||
})
|
||||
return keys, values, nil
|
||||
}
|
||||
|
||||
func (kv *memoryKV) Save(key, value string) error {
|
||||
kv.Lock()
|
||||
defer kv.Unlock()
|
||||
kv.tree.ReplaceOrInsert(memoryKVItem{key, value})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kv *memoryKV) Remove(key string) error {
|
||||
kv.Lock()
|
||||
defer kv.Unlock()
|
||||
|
||||
kv.tree.Delete(memoryKVItem{key, ""})
|
||||
return nil
|
||||
}
|
102
master/member/member.go
Normal file
102
master/member/member.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package member
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/master/election"
|
||||
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
|
||||
"github.com/czs007/suvlim/master/config"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/embed"
|
||||
)
|
||||
|
||||
const (
|
||||
// The timeout to wait transfer etcd leader to complete.
|
||||
moveLeaderTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// Member is used for the election related logic.
|
||||
type Member struct {
|
||||
// etcd and cluster information.
|
||||
leadership *election.Leadership
|
||||
etcd *embed.Etcd
|
||||
client *clientv3.Client
|
||||
id uint64 // etcd server id.
|
||||
member *pdpb.Member // current PD's info.
|
||||
// memberValue is the serialized string of `member`. It will be save in
|
||||
// etcd leader key when the PD node is successfully elected as the PD leader
|
||||
// of the cluster. Every write will use it to check PD leadership.
|
||||
memberValue string
|
||||
}
|
||||
|
||||
// NewMember create a new Member.
|
||||
func NewMember(etcd *embed.Etcd, client *clientv3.Client, id uint64) *Member {
|
||||
return &Member{
|
||||
etcd: etcd,
|
||||
client: client,
|
||||
id: id,
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the unique etcd ID for this server in etcd cluster.
|
||||
func (m *Member) ID() uint64 {
|
||||
return m.id
|
||||
}
|
||||
|
||||
// MemberValue returns the member value.
|
||||
func (m *Member) MemberValue() string {
|
||||
return m.memberValue
|
||||
}
|
||||
|
||||
// Member returns the member.
|
||||
func (m *Member) Member() *pdpb.Member {
|
||||
return m.member
|
||||
}
|
||||
|
||||
// Etcd returns etcd related information.
|
||||
func (m *Member) Etcd() *embed.Etcd {
|
||||
return m.etcd
|
||||
}
|
||||
|
||||
// GetEtcdLeader returns the etcd leader ID.
|
||||
func (m *Member) GetEtcdLeader() uint64 {
|
||||
return m.etcd.Server.Lead()
|
||||
}
|
||||
|
||||
// GetLeadership returns the leadership of the PD member.
|
||||
func (m *Member) GetLeadership() *election.Leadership {
|
||||
return m.leadership
|
||||
}
|
||||
|
||||
// MemberInfo initializes the member info.
|
||||
func (m *Member) MemberInfo(cfg *config.Config, name string) {
|
||||
leader := &pdpb.Member{
|
||||
Name: name,
|
||||
MemberId: m.ID(),
|
||||
ClientUrls: strings.Split(cfg.AdvertiseClientUrls, ","),
|
||||
PeerUrls: strings.Split(cfg.AdvertisePeerUrls, ","),
|
||||
}
|
||||
m.member = leader
|
||||
}
|
||||
|
||||
// Close gracefully shuts down all servers/listeners.
|
||||
func (m *Member) Close() {
|
||||
m.Etcd().Close()
|
||||
}
|
112
master/meta/storage.go
Normal file
112
master/meta/storage.go
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package meta
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"sync"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/pkg/metapb"
|
||||
"github.com/czs007/suvlim/master/kv"
|
||||
)
|
||||
|
||||
const (
|
||||
clusterPath = "raft"
|
||||
configPath = "config"
|
||||
)
|
||||
|
||||
|
||||
// Storage wraps all kv operations, keep it stateless.
|
||||
type Storage struct {
|
||||
kv.Base
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewStorage creates Storage instance with Base.
|
||||
func NewStorage(base kv.Base) *Storage {
|
||||
return &Storage{
|
||||
Base: base,
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMeta loads cluster meta from storage.
|
||||
func (s *Storage) LoadMeta(meta *metapb.Cluster) (bool, error) {
|
||||
return loadProto(s.Base, clusterPath, meta)
|
||||
}
|
||||
|
||||
// SaveMeta save cluster meta to storage.
|
||||
func (s *Storage) SaveMeta(meta *metapb.Cluster) error {
|
||||
return saveProto(s.Base, clusterPath, meta)
|
||||
}
|
||||
|
||||
func (s *Storage) SaveConfig(cfg interface{}) error {
|
||||
value, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return s.Save(configPath, string(value))
|
||||
}
|
||||
|
||||
// LoadConfig loads config from configPath then unmarshal it to cfg.
|
||||
func (s *Storage) LoadConfig(cfg interface{}) (bool, error) {
|
||||
value, err := s.Load(configPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if value == "" {
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(value), cfg)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SaveJSON saves json format data to storage.
|
||||
func (s *Storage) SaveJSON(prefix, key string, data interface{}) error {
|
||||
value, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Save(path.Join(prefix, key), string(value))
|
||||
}
|
||||
|
||||
|
||||
// Close closes the s.
|
||||
func (s *Storage) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadProto(s kv.Base, key string, msg proto.Message) (bool, error) {
|
||||
value, err := s.Load(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if value == "" {
|
||||
return false, nil
|
||||
}
|
||||
err = proto.Unmarshal([]byte(value), msg)
|
||||
return true, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func saveProto(s kv.Base, key string, msg proto.Message) error {
|
||||
value, err := proto.Marshal(msg)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return s.Save(key, string(value))
|
||||
}
|
473
master/server.go
Normal file
473
master/server.go
Normal file
@ -0,0 +1,473 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/czs007/suvlim/master/kv"
|
||||
|
||||
//"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
|
||||
"github.com/czs007/suvlim/master/member"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
|
||||
//"github.com/czs007/suvlim/pkg/"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/pingcap/log"
|
||||
|
||||
"github.com/czs007/suvlim/master/config"
|
||||
"github.com/czs007/suvlim/master/id"
|
||||
"github.com/czs007/suvlim/master/meta"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"github.com/czs007/suvlim/util/logutil"
|
||||
|
||||
//"github.com/czs007/suvlim/master/kv"
|
||||
"github.com/czs007/suvlim/master/tso"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/embed"
|
||||
//"go.etcd.io/etcd/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
etcdTimeout = time.Second * 3
|
||||
pdRootPath = "/pd"
|
||||
pdAPIPrefix = "/pd/"
|
||||
pdClusterIDPath = "/pd/cluster_id"
|
||||
)
|
||||
|
||||
var (
|
||||
// EnableZap enable the zap logger in embed etcd.
|
||||
EnableZap = true
|
||||
// EtcdStartTimeout the timeout of the startup etcd.
|
||||
EtcdStartTimeout = time.Minute * 5
|
||||
)
|
||||
|
||||
// Server is the pd server.
|
||||
type Server struct {
|
||||
// Server state.
|
||||
isServing int64
|
||||
|
||||
// Server start timestamp
|
||||
startTimestamp int64
|
||||
|
||||
// Configs and initial fields.
|
||||
cfg *config.Config
|
||||
etcdCfg *embed.Config
|
||||
persistOptions *config.PersistOptions
|
||||
|
||||
// for PD leader election.
|
||||
member *member.Member
|
||||
|
||||
// Server services.
|
||||
// for id allocator, we can use one allocator for
|
||||
// store, region and peer, because we just need
|
||||
// a unique ID.
|
||||
|
||||
// handler *Handler
|
||||
|
||||
ctx context.Context
|
||||
serverLoopCtx context.Context
|
||||
serverLoopCancel func()
|
||||
serverLoopWg sync.WaitGroup
|
||||
|
||||
// etcd client
|
||||
client *clientv3.Client
|
||||
clusterID uint64 // pd cluster id.
|
||||
rootPath string
|
||||
|
||||
// Server services.
|
||||
// for id allocator, we can use one allocator for
|
||||
// store, region and peer, because we just need
|
||||
// a unique ID.
|
||||
idAllocator *id.AllocatorImpl
|
||||
storage *meta.Storage
|
||||
|
||||
// for tso.
|
||||
tsoAllocator tso.Allocator
|
||||
//
|
||||
|
||||
hbStreams *heartbeatStreams
|
||||
|
||||
// Zap logger
|
||||
lg *zap.Logger
|
||||
logProps *log.ZapProperties
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
closeCallbacks []func()
|
||||
}
|
||||
|
||||
// CreateServer creates the UNINITIALIZED pd server with given configuration.
|
||||
func CreateServer(ctx context.Context, cfg *config.Config) (*Server, error) {
|
||||
log.Info("PD Config", zap.Reflect("config", cfg))
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
s := &Server{
|
||||
cfg: cfg,
|
||||
persistOptions: config.NewPersistOptions(cfg),
|
||||
member: &member.Member{},
|
||||
ctx: ctx,
|
||||
startTimestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// s.handler = newHandler(s)
|
||||
|
||||
// Adjust etcd config.
|
||||
etcdCfg, err := s.cfg.GenEmbedEtcdConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
etcdCfg.ServiceRegister = func(gs *grpc.Server) {
|
||||
pdpb.RegisterPDServer(gs, s)
|
||||
//diagnosticspb.RegisterDiagnosticsServer(gs, s)
|
||||
}
|
||||
s.etcdCfg = etcdCfg
|
||||
|
||||
s.lg = cfg.GetZapLogger()
|
||||
s.logProps = cfg.GetZapLogProperties()
|
||||
|
||||
s.lg = cfg.GetZapLogger()
|
||||
s.logProps = cfg.GetZapLogProperties()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) startEtcd(ctx context.Context) error {
|
||||
newCtx, cancel := context.WithTimeout(ctx, EtcdStartTimeout)
|
||||
defer cancel()
|
||||
|
||||
etcd, err := embed.StartEtcd(s.etcdCfg)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Check cluster ID
|
||||
//urlMap, err := types.NewURLsMap(s.cfg.InitialCluster)
|
||||
//if err != nil {
|
||||
// return errors.WithStack(err)
|
||||
//}
|
||||
tlsConfig, err := s.cfg.Security.ToTLSConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//if err = etcdutil.CheckClusterID(etcd.Server.Cluster().ID(), urlMap, tlsConfig); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
select {
|
||||
// Wait etcd until it is ready to use
|
||||
case <-etcd.Server.ReadyNotify():
|
||||
case <-newCtx.Done():
|
||||
return errors.Errorf("canceled when waiting embed etcd to be ready")
|
||||
}
|
||||
|
||||
endpoints := []string{s.etcdCfg.ACUrls[0].String()}
|
||||
log.Info("create etcd v3 client", zap.Strings("endpoints", endpoints), zap.Reflect("cert", s.cfg.Security))
|
||||
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: endpoints,
|
||||
DialTimeout: etcdTimeout,
|
||||
TLS: tlsConfig,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
etcdServerID := uint64(etcd.Server.ID())
|
||||
|
||||
// update advertise peer urls.
|
||||
etcdMembers, err := etcdutil.ListEtcdMembers(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, m := range etcdMembers.Members {
|
||||
if etcdServerID == m.ID {
|
||||
etcdPeerURLs := strings.Join(m.PeerURLs, ",")
|
||||
if s.cfg.AdvertisePeerUrls != etcdPeerURLs {
|
||||
log.Info("update advertise peer urls", zap.String("from", s.cfg.AdvertisePeerUrls), zap.String("to", etcdPeerURLs))
|
||||
s.cfg.AdvertisePeerUrls = etcdPeerURLs
|
||||
}
|
||||
}
|
||||
}
|
||||
s.client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (s *Server) AddStartCallback(callbacks ...func()) {
|
||||
s.startCallbacks = append(s.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (s *Server) startServer(ctx context.Context) error {
|
||||
var err error
|
||||
if err = s.initClusterID(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("init cluster id", zap.Uint64("cluster-id", s.clusterID))
|
||||
// It may lose accuracy if use float64 to store uint64. So we store the
|
||||
// cluster id in label.
|
||||
//metadataGauge.WithLabelValues(fmt.Sprintf("cluster%d", s.clusterID)).Set(0)
|
||||
|
||||
s.rootPath = path.Join(pdRootPath, strconv.FormatUint(s.clusterID, 10))
|
||||
|
||||
s.idAllocator = id.NewAllocatorImpl(s.client, s.rootPath, s.member.MemberValue())
|
||||
|
||||
s.tsoAllocator = tso.NewGlobalTSOAllocator(
|
||||
s.member.GetLeadership(),
|
||||
s.rootPath,
|
||||
s.cfg.TsoSaveInterval.Duration,
|
||||
func() time.Duration { return s.persistOptions.GetMaxResetTSGap() },
|
||||
)
|
||||
kvBase := kv.NewEtcdKVBase(s.client, s.rootPath)
|
||||
// path := filepath.Join(s.cfg.DataDir, "region-meta")
|
||||
|
||||
// regionStorage, err := core.NewRegionStorage(ctx, path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
s.storage = meta.NewStorage(kvBase)
|
||||
// s.basicCluster = core.NewBasicCluster()
|
||||
// s.cluster = cluster.NewRaftCluster(ctx, s.GetClusterRootPath(), s.clusterID, syncer.NewRegionSyncer(s), s.client, s.httpClient)
|
||||
s.hbStreams = newHeartbeatStreams(ctx, s.clusterID)
|
||||
|
||||
// Run callbacks
|
||||
for _, cb := range s.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
// Server has started.
|
||||
atomic.StoreInt64(&s.isServing, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) initClusterID() error {
|
||||
// Get any cluster key to parse the cluster ID.
|
||||
resp, err := etcdutil.EtcdKVGet(s.client, pdClusterIDPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If no key exist, generate a random cluster ID.
|
||||
if len(resp.Kvs) == 0 {
|
||||
s.clusterID, err = initOrGetClusterID(s.client, pdClusterIDPath)
|
||||
return err
|
||||
}
|
||||
s.clusterID, err = typeutil.BytesToUint64(resp.Kvs[0].Value)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (s *Server) AddCloseCallback(callbacks ...func()) {
|
||||
s.closeCallbacks = append(s.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
// Close closes the server.
|
||||
func (s *Server) Close() {
|
||||
if !atomic.CompareAndSwapInt64(&s.isServing, 1, 0) {
|
||||
// server is already closed
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("closing server")
|
||||
|
||||
s.stopServerLoop()
|
||||
|
||||
if s.client != nil {
|
||||
s.client.Close()
|
||||
}
|
||||
|
||||
if s.member.Etcd() != nil {
|
||||
s.member.Close()
|
||||
}
|
||||
|
||||
if s.hbStreams != nil {
|
||||
s.hbStreams.Close()
|
||||
}
|
||||
if err := s.storage.Close(); err != nil {
|
||||
log.Error("close storage meet error", zap.Error(err))
|
||||
}
|
||||
|
||||
// Run callbacks
|
||||
for _, cb := range s.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
log.Info("close server")
|
||||
}
|
||||
|
||||
// IsClosed checks whether server is closed or not.
|
||||
func (s *Server) IsClosed() bool {
|
||||
return atomic.LoadInt64(&s.isServing) == 0
|
||||
}
|
||||
|
||||
// Run runs the pd server.
|
||||
func (s *Server) Run() error {
|
||||
//go StartMonitor(s.ctx, time.Now, func() {
|
||||
// log.Error("system time jumps backward")
|
||||
// timeJumpBackCounter.Inc()
|
||||
//})
|
||||
if err := s.startEtcd(s.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.startServer(s.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.startServerLoop(s.ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Context returns the context of server.
|
||||
func (s *Server) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// LoopContext returns the loop context of server.
|
||||
func (s *Server) LoopContext() context.Context {
|
||||
return s.serverLoopCtx
|
||||
}
|
||||
|
||||
func (s *Server) startServerLoop(ctx context.Context) {
|
||||
s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(ctx)
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.etcdLeaderLoop()
|
||||
}
|
||||
|
||||
func (s *Server) stopServerLoop() {
|
||||
s.serverLoopCancel()
|
||||
s.serverLoopWg.Wait()
|
||||
}
|
||||
|
||||
// GetAddr returns the server urls for clients.
|
||||
func (s *Server) GetAddr() string {
|
||||
return s.cfg.AdvertiseClientUrls
|
||||
}
|
||||
|
||||
// GetClientScheme returns the client URL scheme
|
||||
func (s *Server) GetClientScheme() string {
|
||||
if len(s.cfg.Security.CertPath) == 0 && len(s.cfg.Security.KeyPath) == 0 {
|
||||
return "http"
|
||||
}
|
||||
return "https"
|
||||
}
|
||||
|
||||
// GetClient returns builtin etcd client.
|
||||
func (s *Server) GetClient() *clientv3.Client {
|
||||
return s.client
|
||||
}
|
||||
|
||||
// GetPersistOptions returns the schedule option.
|
||||
func (s *Server) GetPersistOptions() *config.PersistOptions {
|
||||
return s.persistOptions
|
||||
}
|
||||
|
||||
// GetStorage returns the backend storage of server.
|
||||
func (s *Server) GetStorage() *meta.Storage {
|
||||
return s.storage
|
||||
}
|
||||
|
||||
// GetHBStreams returns the heartbeat streams.
|
||||
func (s *Server) GetHBStreams() HeartbeatStreams {
|
||||
return s.hbStreams
|
||||
}
|
||||
|
||||
// GetAllocator returns the ID allocator of server.
|
||||
func (s *Server) GetAllocator() *id.AllocatorImpl {
|
||||
return s.idAllocator
|
||||
}
|
||||
|
||||
// Name returns the unique etcd Name for this server in etcd cluster.
|
||||
func (s *Server) Name() string {
|
||||
return s.cfg.Name
|
||||
}
|
||||
|
||||
// ClusterID returns the cluster ID of this server.
|
||||
func (s *Server) ClusterID() uint64 {
|
||||
return s.clusterID
|
||||
}
|
||||
|
||||
// StartTimestamp returns the start timestamp of this server
|
||||
func (s *Server) StartTimestamp() int64 {
|
||||
return s.startTimestamp
|
||||
}
|
||||
|
||||
// GetConfig gets the config information.
|
||||
func (s *Server) GetConfig() *config.Config {
|
||||
cfg := s.cfg.Clone()
|
||||
cfg.PDServerCfg = *s.persistOptions.GetPDServerConfig()
|
||||
storage := s.GetStorage()
|
||||
if storage == nil {
|
||||
return cfg
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// GetServerOption gets the option of the server.
|
||||
func (s *Server) GetServerOption() *config.PersistOptions {
|
||||
return s.persistOptions
|
||||
}
|
||||
|
||||
// SetLogLevel sets log level.
|
||||
func (s *Server) SetLogLevel(level string) error {
|
||||
if !isLevelLegal(level) {
|
||||
return errors.Errorf("log level %s is illegal", level)
|
||||
}
|
||||
s.cfg.Log.Level = level
|
||||
log.SetLevel(logutil.StringToZapLogLevel(level))
|
||||
log.Warn("log level changed", zap.String("level", log.GetLevel().String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func isLevelLegal(level string) bool {
|
||||
switch strings.ToLower(level) {
|
||||
case "fatal", "error", "warn", "warning", "debug", "info":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) etcdLeaderLoop() {
|
||||
defer logutil.LogPanic()
|
||||
defer s.serverLoopWg.Done()
|
||||
|
||||
ctx, cancel := context.WithCancel(s.serverLoopCtx)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
// case <-time.After(s.cfg.LeaderPriorityCheckInterval.Duration):
|
||||
// s.member.CheckPriority(ctx)
|
||||
case <-ctx.Done():
|
||||
log.Info("server is closed, exit etcd leader loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
130
master/tso/global_allocator.go
Normal file
130
master/tso/global_allocator.go
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2020 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tso
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/master/election"
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"github.com/pingcap/log"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Allocator is a Timestamp Oracle allocator.
|
||||
type Allocator interface {
|
||||
// Initialize is used to initialize a TSO allocator.
|
||||
// It will synchronize TSO with etcd and initialize the
|
||||
// memory for later allocation work.
|
||||
Initialize() error
|
||||
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
|
||||
UpdateTSO() error
|
||||
// SetTSO sets the physical part with given tso. It's mainly used for BR restore
|
||||
// and can not forcibly set the TSO smaller than now.
|
||||
SetTSO(tso uint64) error
|
||||
// GenerateTSO is used to generate a given number of TSOs.
|
||||
// Make sure you have initialized the TSO allocator before calling.
|
||||
GenerateTSO(count uint32) (pdpb.Timestamp, error)
|
||||
// Reset is used to reset the TSO allocator.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// GlobalTSOAllocator is the global single point TSO allocator.
|
||||
type GlobalTSOAllocator struct {
|
||||
// leadership is used to check the current PD server's leadership
|
||||
// to determine whether a tso request could be processed.
|
||||
leadership *election.Leadership
|
||||
timestampOracle *timestampOracle
|
||||
}
|
||||
|
||||
// NewGlobalTSOAllocator creates a new global TSO allocator.
|
||||
func NewGlobalTSOAllocator(leadership *election.Leadership, rootPath string, saveInterval time.Duration, maxResetTSGap func() time.Duration) Allocator {
|
||||
return &GlobalTSOAllocator{
|
||||
leadership: leadership,
|
||||
timestampOracle: ×tampOracle{
|
||||
client: leadership.GetClient(),
|
||||
rootPath: rootPath,
|
||||
saveInterval: saveInterval,
|
||||
maxResetTSGap: maxResetTSGap,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize will initialize the created global TSO allocator.
|
||||
func (gta *GlobalTSOAllocator) Initialize() error {
|
||||
return gta.timestampOracle.SyncTimestamp(gta.leadership)
|
||||
}
|
||||
|
||||
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
|
||||
func (gta *GlobalTSOAllocator) UpdateTSO() error {
|
||||
return gta.timestampOracle.UpdateTimestamp(gta.leadership)
|
||||
}
|
||||
|
||||
// SetTSO sets the physical part with given tso.
|
||||
func (gta *GlobalTSOAllocator) SetTSO(tso uint64) error {
|
||||
return gta.timestampOracle.ResetUserTimestamp(gta.leadership, tso)
|
||||
}
|
||||
|
||||
// GenerateTSO is used to generate a given number of TSOs.
|
||||
// Make sure you have initialized the TSO allocator before calling.
|
||||
func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (pdpb.Timestamp, error) {
|
||||
var resp pdpb.Timestamp
|
||||
|
||||
if count == 0 {
|
||||
return resp, errors.New("tso count should be positive")
|
||||
}
|
||||
|
||||
maxRetryCount := 10
|
||||
// failpoint.Inject("skipRetryGetTS", func() {
|
||||
// maxRetryCount = 1
|
||||
// })
|
||||
|
||||
for i := 0; i < maxRetryCount; i++ {
|
||||
current := (*atomicObject)(atomic.LoadPointer(>a.timestampOracle.TSO))
|
||||
if current == nil || current.physical == typeutil.ZeroTime {
|
||||
// If it's leader, maybe SyncTimestamp hasn't completed yet
|
||||
if gta.leadership.Check() {
|
||||
log.Info("sync hasn't completed yet, wait for a while")
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
log.Error("invalid timestamp", zap.Any("timestamp", current))
|
||||
return pdpb.Timestamp{}, errors.New("can not get timestamp, may be not leader")
|
||||
}
|
||||
|
||||
resp.Physical = current.physical.UnixNano() / int64(time.Millisecond)
|
||||
resp.Logical = atomic.AddInt64(¤t.logical, int64(count))
|
||||
if resp.Logical >= maxLogical {
|
||||
log.Error("logical part outside of max logical interval, please check ntp time",
|
||||
zap.Reflect("response", resp),
|
||||
zap.Int("retry-count", i))
|
||||
time.Sleep(UpdateTimestampStep)
|
||||
continue
|
||||
}
|
||||
// In case lease expired after the first check.
|
||||
if !gta.leadership.Check() {
|
||||
return pdpb.Timestamp{}, errors.New("alloc timestamp failed, lease expired")
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
return resp, errors.New("can not get timestamp")
|
||||
}
|
||||
|
||||
// Reset is used to reset the TSO allocator.
|
||||
func (gta *GlobalTSOAllocator) Reset() {
|
||||
gta.timestampOracle.ResetTimestamp()
|
||||
}
|
216
master/tso/tso.go
Normal file
216
master/tso/tso.go
Normal file
@ -0,0 +1,216 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tso
|
||||
|
||||
import (
|
||||
"path"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/master/election"
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/czs007/suvlim/util/tsoutil"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"github.com/pingcap/log"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
// UpdateTimestampStep is used to update timestamp.
|
||||
UpdateTimestampStep = 50 * time.Millisecond
|
||||
// updateTimestampGuard is the min timestamp interval.
|
||||
updateTimestampGuard = time.Millisecond
|
||||
// maxLogical is the max upper limit for logical time.
|
||||
// When a TSO's logical time reaches this limit,
|
||||
// the physical time will be forced to increase.
|
||||
maxLogical = int64(1 << 18)
|
||||
)
|
||||
|
||||
// atomicObject is used to store the current TSO in memory.
|
||||
type atomicObject struct {
|
||||
physical time.Time
|
||||
logical int64
|
||||
}
|
||||
|
||||
// timestampOracle is used to maintain the logic of tso.
|
||||
type timestampOracle struct {
|
||||
client *clientv3.Client
|
||||
rootPath string
|
||||
// TODO: remove saveInterval
|
||||
saveInterval time.Duration
|
||||
maxResetTSGap func() time.Duration
|
||||
// For tso, set after the PD becomes a leader.
|
||||
TSO unsafe.Pointer
|
||||
lastSavedTime atomic.Value
|
||||
}
|
||||
|
||||
func (t *timestampOracle) getTimestampPath() string {
|
||||
return path.Join(t.rootPath, "timestamp")
|
||||
}
|
||||
|
||||
func (t *timestampOracle) loadTimestamp() (time.Time, error) {
|
||||
data, err := etcdutil.GetValue(t.client, t.getTimestampPath())
|
||||
if err != nil {
|
||||
return typeutil.ZeroTime, err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return typeutil.ZeroTime, nil
|
||||
}
|
||||
return typeutil.ParseTimestamp(data)
|
||||
}
|
||||
|
||||
// save timestamp, if lastTs is 0, we think the timestamp doesn't exist, so create it,
|
||||
// otherwise, update it.
|
||||
func (t *timestampOracle) saveTimestamp(leadership *election.Leadership, ts time.Time) error {
|
||||
key := t.getTimestampPath()
|
||||
data := typeutil.Uint64ToBytes(uint64(ts.UnixNano()))
|
||||
resp, err := leadership.LeaderTxn().
|
||||
Then(clientv3.OpPut(key, string(data))).
|
||||
Commit()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if !resp.Succeeded {
|
||||
return errors.New("save timestamp failed, maybe we lost leader")
|
||||
}
|
||||
|
||||
t.lastSavedTime.Store(ts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncTimestamp is used to synchronize the timestamp.
|
||||
func (t *timestampOracle) SyncTimestamp(leadership *election.Leadership) error {
|
||||
|
||||
last, err := t.loadTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
next := time.Now()
|
||||
|
||||
// If the current system time minus the saved etcd timestamp is less than `updateTimestampGuard`,
|
||||
// the timestamp allocation will start from the saved etcd timestamp temporarily.
|
||||
if typeutil.SubTimeByWallClock(next, last) < updateTimestampGuard {
|
||||
next = last.Add(updateTimestampGuard)
|
||||
}
|
||||
|
||||
save := next.Add(t.saveInterval)
|
||||
if err = t.saveTimestamp(leadership, save); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("sync and save timestamp", zap.Time("last", last), zap.Time("save", save), zap.Time("next", next))
|
||||
|
||||
current := &atomicObject{
|
||||
physical: next,
|
||||
}
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetUserTimestamp update the physical part with specified tso.
|
||||
func (t *timestampOracle) ResetUserTimestamp(leadership *election.Leadership, tso uint64) error {
|
||||
if !leadership.Check() {
|
||||
return errors.New("Setup timestamp failed, lease expired")
|
||||
}
|
||||
physical, _ := tsoutil.ParseTS(tso)
|
||||
next := physical.Add(time.Millisecond)
|
||||
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
|
||||
|
||||
// do not update
|
||||
if typeutil.SubTimeByWallClock(next, prev.physical) <= 3*updateTimestampGuard {
|
||||
return errors.New("the specified ts too small than now")
|
||||
}
|
||||
|
||||
if typeutil.SubTimeByWallClock(next, prev.physical) >= t.maxResetTSGap() {
|
||||
return errors.New("the specified ts too large than now")
|
||||
}
|
||||
|
||||
save := next.Add(t.saveInterval)
|
||||
if err := t.saveTimestamp(leadership, save); err != nil {
|
||||
return err
|
||||
}
|
||||
update := &atomicObject{
|
||||
physical: next,
|
||||
}
|
||||
atomic.CompareAndSwapPointer(&t.TSO, unsafe.Pointer(prev), unsafe.Pointer(update))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTimestamp is used to update the timestamp.
|
||||
// This function will do two things:
|
||||
// 1. When the logical time is going to be used up, increase the current physical time.
|
||||
// 2. When the time window is not big enough, which means the saved etcd time minus the next physical time
|
||||
// will be less than or equal to `updateTimestampGuard`, then the time window needs to be updated and
|
||||
// we also need to save the next physical time plus `TsoSaveInterval` into etcd.
|
||||
//
|
||||
// Here is some constraints that this function must satisfy:
|
||||
// 1. The saved time is monotonically increasing.
|
||||
// 2. The physical time is monotonically increasing.
|
||||
// 3. The physical time is always less than the saved timestamp.
|
||||
func (t *timestampOracle) UpdateTimestamp(leadership *election.Leadership) error {
|
||||
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
|
||||
now := time.Now()
|
||||
|
||||
jetLag := typeutil.SubTimeByWallClock(now, prev.physical)
|
||||
if jetLag > 3*UpdateTimestampStep {
|
||||
log.Warn("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
|
||||
}
|
||||
|
||||
var next time.Time
|
||||
prevLogical := atomic.LoadInt64(&prev.logical)
|
||||
// If the system time is greater, it will be synchronized with the system time.
|
||||
if jetLag > updateTimestampGuard {
|
||||
next = now
|
||||
} else if prevLogical > maxLogical/2 {
|
||||
// The reason choosing maxLogical/2 here is that it's big enough for common cases.
|
||||
// Because there is enough timestamp can be allocated before next update.
|
||||
log.Warn("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
|
||||
next = prev.physical.Add(time.Millisecond)
|
||||
} else {
|
||||
// It will still use the previous physical time to alloc the timestamp.
|
||||
return nil
|
||||
}
|
||||
|
||||
// It is not safe to increase the physical time to `next`.
|
||||
// The time window needs to be updated and saved to etcd.
|
||||
if typeutil.SubTimeByWallClock(t.lastSavedTime.Load().(time.Time), next) <= updateTimestampGuard {
|
||||
save := next.Add(t.saveInterval)
|
||||
if err := t.saveTimestamp(leadership, save); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
current := &atomicObject{
|
||||
physical: next,
|
||||
logical: 0,
|
||||
}
|
||||
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetTimestamp is used to reset the timestamp.
|
||||
func (t *timestampOracle) ResetTimestamp() {
|
||||
zero := &atomicObject{
|
||||
physical: typeutil.ZeroTime,
|
||||
}
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(zero))
|
||||
}
|
68
master/util.go
Normal file
68
master/util.go
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/czs007/suvlim/util/etcdutil"
|
||||
"github.com/czs007/suvlim/util/typeutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
const (
|
||||
requestTimeout = etcdutil.DefaultRequestTimeout
|
||||
)
|
||||
|
||||
func initOrGetClusterID(c *clientv3.Client, key string) (uint64, error) {
|
||||
ctx, cancel := context.WithTimeout(c.Ctx(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Generate a random cluster ID.
|
||||
ts := uint64(time.Now().Unix())
|
||||
clusterID := (ts << 32) + uint64(rand.Uint32())
|
||||
value := typeutil.Uint64ToBytes(clusterID)
|
||||
|
||||
// Multiple PDs may try to init the cluster ID at the same time.
|
||||
// Only one PD can commit this transaction, then other PDs can get
|
||||
// the committed cluster ID.
|
||||
resp, err := c.Txn(ctx).
|
||||
If(clientv3.Compare(clientv3.CreateRevision(key), "=", 0)).
|
||||
Then(clientv3.OpPut(key, string(value))).
|
||||
Else(clientv3.OpGet(key)).
|
||||
Commit()
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Txn commits ok, return the generated cluster ID.
|
||||
if resp.Succeeded {
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
// Otherwise, parse the committed cluster ID.
|
||||
if len(resp.Responses) == 0 {
|
||||
return 0, errors.Errorf("txn returns empty response: %v", resp)
|
||||
}
|
||||
|
||||
response := resp.Responses[0].GetResponseRange()
|
||||
if response == nil || len(response.Kvs) != 1 {
|
||||
return 0, errors.Errorf("txn returns invalid range response: %v", resp)
|
||||
}
|
||||
|
||||
return typeutil.BytesToUint64(response.Kvs[0].Value)
|
||||
}
|
140
pkg/metapb/metapb.pb.go
Normal file
140
pkg/metapb/metapb.pb.go
Normal file
@ -0,0 +1,140 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: metapb.proto
|
||||
|
||||
/*
|
||||
Package metapb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
metapb.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Cluster
|
||||
Peer
|
||||
*/
|
||||
package metapb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type PeerRole int32
|
||||
|
||||
const (
|
||||
PeerRole_Master PeerRole = 0
|
||||
PeerRole_Reader PeerRole = 1
|
||||
PeerRole_Write PeerRole = 2
|
||||
PeerRole_Proxyer PeerRole = 3
|
||||
)
|
||||
|
||||
var PeerRole_name = map[int32]string{
|
||||
0: "Master",
|
||||
1: "Reader",
|
||||
2: "Write",
|
||||
3: "Proxyer",
|
||||
}
|
||||
var PeerRole_value = map[string]int32{
|
||||
"Master": 0,
|
||||
"Reader": 1,
|
||||
"Write": 2,
|
||||
"Proxyer": 3,
|
||||
}
|
||||
|
||||
func (x PeerRole) String() string {
|
||||
return proto.EnumName(PeerRole_name, int32(x))
|
||||
}
|
||||
func (PeerRole) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
type Cluster struct {
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
|
||||
// max peer count for a region.
|
||||
// pd will do the auto-balance if region peer count mismatches.
|
||||
MaxPeerCount uint32 `protobuf:"varint,1024,opt,name=max_peer_count,json=maxPeerCount" json:"max_peer_count,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Cluster) GetId() uint64 {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Cluster) GetMaxPeerCount() uint32 {
|
||||
if m != nil {
|
||||
return m.MaxPeerCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
|
||||
PeerId uint64 `protobuf:"varint,2,opt,name=peer_id,json=peerId" json:"peer_id,omitempty"`
|
||||
Role PeerRole `protobuf:"varint,3,opt,name=role,enum=metapb.PeerRole" json:"role,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Peer) Reset() { *m = Peer{} }
|
||||
func (m *Peer) String() string { return proto.CompactTextString(m) }
|
||||
func (*Peer) ProtoMessage() {}
|
||||
func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *Peer) GetId() uint64 {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Peer) GetPeerId() uint64 {
|
||||
if m != nil {
|
||||
return m.PeerId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Peer) GetRole() PeerRole {
|
||||
if m != nil {
|
||||
return m.Role
|
||||
}
|
||||
return PeerRole_Master
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Cluster)(nil), "metapb.Cluster")
|
||||
proto.RegisterType((*Peer)(nil), "metapb.Peer")
|
||||
proto.RegisterEnum("metapb.PeerRole", PeerRole_name, PeerRole_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("metapb.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 210 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x8f, 0xb1, 0x4b, 0xc5, 0x30,
|
||||
0x10, 0xc6, 0x4d, 0x5e, 0x4d, 0x9f, 0xe7, 0xb3, 0x84, 0x2c, 0x66, 0x2c, 0x0f, 0x85, 0xe2, 0xd0,
|
||||
0x41, 0x37, 0x27, 0xa1, 0x93, 0x83, 0x50, 0x02, 0xe2, 0x58, 0x52, 0x73, 0x43, 0xa0, 0x35, 0xe5,
|
||||
0x4c, 0xa1, 0x6e, 0xfe, 0xe9, 0x92, 0xa8, 0xd3, 0xdb, 0xee, 0xf7, 0x3b, 0xbe, 0xe3, 0x3b, 0x38,
|
||||
0xcc, 0x18, 0xed, 0x32, 0xb6, 0x0b, 0x85, 0x18, 0x94, 0xf8, 0xa5, 0xe3, 0x13, 0x94, 0xdd, 0xb4,
|
||||
0x7e, 0x46, 0x24, 0x55, 0x01, 0xf7, 0x4e, 0xb3, 0x9a, 0x35, 0x85, 0xe1, 0xde, 0xa9, 0x5b, 0xa8,
|
||||
0x66, 0xbb, 0x0d, 0x0b, 0x22, 0x0d, 0xef, 0x61, 0xfd, 0x88, 0xfa, 0x7b, 0x5f, 0xb3, 0xe6, 0xca,
|
||||
0x1c, 0x66, 0xbb, 0xf5, 0x88, 0xd4, 0x25, 0x79, 0x7c, 0x85, 0x22, 0xc1, 0x49, 0xfc, 0x1a, 0xca,
|
||||
0x1c, 0xf5, 0x4e, 0xf3, 0x2c, 0x45, 0xc2, 0x67, 0xa7, 0x6e, 0xa0, 0xa0, 0x30, 0xa1, 0xde, 0xd5,
|
||||
0xac, 0xa9, 0xee, 0x65, 0xfb, 0xd7, 0x2b, 0x1d, 0x31, 0x61, 0x42, 0x93, 0xb7, 0x77, 0x8f, 0xb0,
|
||||
0xff, 0x37, 0x0a, 0x40, 0xbc, 0xd8, 0xd4, 0x51, 0x9e, 0xa5, 0xd9, 0xa0, 0x75, 0x48, 0x92, 0xa9,
|
||||
0x0b, 0x38, 0x7f, 0x23, 0x1f, 0x51, 0x72, 0x75, 0x09, 0x65, 0x4f, 0x61, 0xfb, 0x42, 0x92, 0xbb,
|
||||
0x51, 0xe4, 0x1f, 0x1f, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0x03, 0x47, 0x25, 0x66, 0xf3, 0x00,
|
||||
0x00, 0x00,
|
||||
}
|
610
pkg/pdpb/pdpb.pb.go
Normal file
610
pkg/pdpb/pdpb.pb.go
Normal file
@ -0,0 +1,610 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: pdpb.proto
|
||||
|
||||
/*
|
||||
Package pdpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
pdpb.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Member
|
||||
RequestHeader
|
||||
ResponseHeader
|
||||
Error
|
||||
TsoRequest
|
||||
Timestamp
|
||||
TsoResponse
|
||||
AllocIDRequest
|
||||
AllocIDResponse
|
||||
HeartbeatRequest
|
||||
HeartbeatResponse
|
||||
*/
|
||||
package pdpb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import metapb "github.com/czs007/suvlim/pkg/metapb"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type ErrorType int32
|
||||
|
||||
const (
|
||||
ErrorType_OK ErrorType = 0
|
||||
ErrorType_UNKNOWN ErrorType = 1
|
||||
ErrorType_NOT_BOOTSTRAPPED ErrorType = 2
|
||||
ErrorType_STORE_TOMBSTONE ErrorType = 3
|
||||
ErrorType_ALREADY_BOOTSTRAPPED ErrorType = 4
|
||||
ErrorType_INCOMPATIBLE_VERSION ErrorType = 5
|
||||
ErrorType_REGION_NOT_FOUND ErrorType = 6
|
||||
)
|
||||
|
||||
var ErrorType_name = map[int32]string{
|
||||
0: "OK",
|
||||
1: "UNKNOWN",
|
||||
2: "NOT_BOOTSTRAPPED",
|
||||
3: "STORE_TOMBSTONE",
|
||||
4: "ALREADY_BOOTSTRAPPED",
|
||||
5: "INCOMPATIBLE_VERSION",
|
||||
6: "REGION_NOT_FOUND",
|
||||
}
|
||||
var ErrorType_value = map[string]int32{
|
||||
"OK": 0,
|
||||
"UNKNOWN": 1,
|
||||
"NOT_BOOTSTRAPPED": 2,
|
||||
"STORE_TOMBSTONE": 3,
|
||||
"ALREADY_BOOTSTRAPPED": 4,
|
||||
"INCOMPATIBLE_VERSION": 5,
|
||||
"REGION_NOT_FOUND": 6,
|
||||
}
|
||||
|
||||
func (x ErrorType) String() string {
|
||||
return proto.EnumName(ErrorType_name, int32(x))
|
||||
}
|
||||
func (ErrorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
type Member struct {
|
||||
// name is the name of the PD member.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
// member_id is the unique id of the PD member.
|
||||
MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId" json:"member_id,omitempty"`
|
||||
PeerUrls []string `protobuf:"bytes,3,rep,name=peer_urls,json=peerUrls" json:"peer_urls,omitempty"`
|
||||
ClientUrls []string `protobuf:"bytes,4,rep,name=client_urls,json=clientUrls" json:"client_urls,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Member) Reset() { *m = Member{} }
|
||||
func (m *Member) String() string { return proto.CompactTextString(m) }
|
||||
func (*Member) ProtoMessage() {}
|
||||
func (*Member) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *Member) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Member) GetMemberId() uint64 {
|
||||
if m != nil {
|
||||
return m.MemberId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Member) GetPeerUrls() []string {
|
||||
if m != nil {
|
||||
return m.PeerUrls
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Member) GetClientUrls() []string {
|
||||
if m != nil {
|
||||
return m.ClientUrls
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RequestHeader struct {
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RequestHeader) Reset() { *m = RequestHeader{} }
|
||||
func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestHeader) ProtoMessage() {}
|
||||
func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *RequestHeader) GetClusterId() uint64 {
|
||||
if m != nil {
|
||||
return m.ClusterId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ResponseHeader struct {
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"`
|
||||
Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
|
||||
func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*ResponseHeader) ProtoMessage() {}
|
||||
func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *ResponseHeader) GetClusterId() uint64 {
|
||||
if m != nil {
|
||||
return m.ClusterId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ResponseHeader) GetError() *Error {
|
||||
if m != nil {
|
||||
return m.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Type ErrorType `protobuf:"varint,1,opt,name=type,enum=pdpb.ErrorType" json:"type,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Error) Reset() { *m = Error{} }
|
||||
func (m *Error) String() string { return proto.CompactTextString(m) }
|
||||
func (*Error) ProtoMessage() {}
|
||||
func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *Error) GetType() ErrorType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return ErrorType_OK
|
||||
}
|
||||
|
||||
func (m *Error) GetMessage() string {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type TsoRequest struct {
|
||||
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
Count uint32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TsoRequest) Reset() { *m = TsoRequest{} }
|
||||
func (m *TsoRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TsoRequest) ProtoMessage() {}
|
||||
func (*TsoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *TsoRequest) GetHeader() *RequestHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TsoRequest) GetCount() uint32 {
|
||||
if m != nil {
|
||||
return m.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Timestamp struct {
|
||||
Physical int64 `protobuf:"varint,1,opt,name=physical" json:"physical,omitempty"`
|
||||
Logical int64 `protobuf:"varint,2,opt,name=logical" json:"logical,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
func (m *Timestamp) GetPhysical() int64 {
|
||||
if m != nil {
|
||||
return m.Physical
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetLogical() int64 {
|
||||
if m != nil {
|
||||
return m.Logical
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type TsoResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
Count uint32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
|
||||
Timestamp *Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (m *TsoResponse) Reset() { *m = TsoResponse{} }
|
||||
func (m *TsoResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TsoResponse) ProtoMessage() {}
|
||||
func (*TsoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
func (m *TsoResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *TsoResponse) GetCount() uint32 {
|
||||
if m != nil {
|
||||
return m.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TsoResponse) GetTimestamp() *Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AllocIDRequest struct {
|
||||
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
}
|
||||
|
||||
func (m *AllocIDRequest) Reset() { *m = AllocIDRequest{} }
|
||||
func (m *AllocIDRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*AllocIDRequest) ProtoMessage() {}
|
||||
func (*AllocIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
func (m *AllocIDRequest) GetHeader() *RequestHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type AllocIDResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
Id uint64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *AllocIDResponse) Reset() { *m = AllocIDResponse{} }
|
||||
func (m *AllocIDResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*AllocIDResponse) ProtoMessage() {}
|
||||
func (*AllocIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
|
||||
func (m *AllocIDResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AllocIDResponse) GetId() uint64 {
|
||||
if m != nil {
|
||||
return m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type HeartbeatRequest struct {
|
||||
Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
Peer *metapb.Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} }
|
||||
func (m *HeartbeatRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeartbeatRequest) ProtoMessage() {}
|
||||
func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||
|
||||
func (m *HeartbeatRequest) GetHeader() *RequestHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *HeartbeatRequest) GetPeer() *metapb.Peer {
|
||||
if m != nil {
|
||||
return m.Peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type HeartbeatResponse struct {
|
||||
Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
|
||||
}
|
||||
|
||||
func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} }
|
||||
func (m *HeartbeatResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*HeartbeatResponse) ProtoMessage() {}
|
||||
func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||
|
||||
func (m *HeartbeatResponse) GetHeader() *ResponseHeader {
|
||||
if m != nil {
|
||||
return m.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Member)(nil), "pdpb.Member")
|
||||
proto.RegisterType((*RequestHeader)(nil), "pdpb.RequestHeader")
|
||||
proto.RegisterType((*ResponseHeader)(nil), "pdpb.ResponseHeader")
|
||||
proto.RegisterType((*Error)(nil), "pdpb.Error")
|
||||
proto.RegisterType((*TsoRequest)(nil), "pdpb.TsoRequest")
|
||||
proto.RegisterType((*Timestamp)(nil), "pdpb.Timestamp")
|
||||
proto.RegisterType((*TsoResponse)(nil), "pdpb.TsoResponse")
|
||||
proto.RegisterType((*AllocIDRequest)(nil), "pdpb.AllocIDRequest")
|
||||
proto.RegisterType((*AllocIDResponse)(nil), "pdpb.AllocIDResponse")
|
||||
proto.RegisterType((*HeartbeatRequest)(nil), "pdpb.HeartbeatRequest")
|
||||
proto.RegisterType((*HeartbeatResponse)(nil), "pdpb.HeartbeatResponse")
|
||||
proto.RegisterEnum("pdpb.ErrorType", ErrorType_name, ErrorType_value)
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for PD service
|
||||
|
||||
type PDClient interface {
|
||||
Tso(ctx context.Context, opts ...grpc.CallOption) (PD_TsoClient, error)
|
||||
AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error)
|
||||
Heartbeat(ctx context.Context, opts ...grpc.CallOption) (PD_HeartbeatClient, error)
|
||||
}
|
||||
|
||||
type pDClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewPDClient(cc *grpc.ClientConn) PDClient {
|
||||
return &pDClient{cc}
|
||||
}
|
||||
|
||||
func (c *pDClient) Tso(ctx context.Context, opts ...grpc.CallOption) (PD_TsoClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_PD_serviceDesc.Streams[0], c.cc, "/pdpb.PD/Tso", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &pDTsoClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type PD_TsoClient interface {
|
||||
Send(*TsoRequest) error
|
||||
Recv() (*TsoResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type pDTsoClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *pDTsoClient) Send(m *TsoRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *pDTsoClient) Recv() (*TsoResponse, error) {
|
||||
m := new(TsoResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *pDClient) AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error) {
|
||||
out := new(AllocIDResponse)
|
||||
err := grpc.Invoke(ctx, "/pdpb.PD/AllocID", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *pDClient) Heartbeat(ctx context.Context, opts ...grpc.CallOption) (PD_HeartbeatClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_PD_serviceDesc.Streams[1], c.cc, "/pdpb.PD/Heartbeat", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &pDHeartbeatClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type PD_HeartbeatClient interface {
|
||||
Send(*HeartbeatRequest) error
|
||||
Recv() (*HeartbeatResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type pDHeartbeatClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *pDHeartbeatClient) Send(m *HeartbeatRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *pDHeartbeatClient) Recv() (*HeartbeatResponse, error) {
|
||||
m := new(HeartbeatResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Server API for PD service
|
||||
|
||||
type PDServer interface {
|
||||
Tso(PD_TsoServer) error
|
||||
AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error)
|
||||
Heartbeat(PD_HeartbeatServer) error
|
||||
}
|
||||
|
||||
func RegisterPDServer(s *grpc.Server, srv PDServer) {
|
||||
s.RegisterService(&_PD_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _PD_Tso_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(PDServer).Tso(&pDTsoServer{stream})
|
||||
}
|
||||
|
||||
type PD_TsoServer interface {
|
||||
Send(*TsoResponse) error
|
||||
Recv() (*TsoRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type pDTsoServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *pDTsoServer) Send(m *TsoResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *pDTsoServer) Recv() (*TsoRequest, error) {
|
||||
m := new(TsoRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func _PD_AllocID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AllocIDRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(PDServer).AllocID(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pdpb.PD/AllocID",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(PDServer).AllocID(ctx, req.(*AllocIDRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _PD_Heartbeat_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(PDServer).Heartbeat(&pDHeartbeatServer{stream})
|
||||
}
|
||||
|
||||
type PD_HeartbeatServer interface {
|
||||
Send(*HeartbeatResponse) error
|
||||
Recv() (*HeartbeatRequest, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type pDHeartbeatServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *pDHeartbeatServer) Send(m *HeartbeatResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *pDHeartbeatServer) Recv() (*HeartbeatRequest, error) {
|
||||
m := new(HeartbeatRequest)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
var _PD_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "pdpb.PD",
|
||||
HandlerType: (*PDServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "AllocID",
|
||||
Handler: _PD_AllocID_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Tso",
|
||||
Handler: _PD_Tso_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "Heartbeat",
|
||||
Handler: _PD_Heartbeat_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "pdpb.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("pdpb.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 603 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xad, 0x1d, 0x27, 0xad, 0x27, 0x6d, 0xea, 0x4e, 0x03, 0x44, 0x41, 0x88, 0xb0, 0x5c, 0x22,
|
||||
0x3e, 0xa2, 0x2a, 0x5c, 0xb8, 0x70, 0x70, 0x88, 0x4b, 0xad, 0xb6, 0xde, 0x68, 0xe3, 0x82, 0x38,
|
||||
0x45, 0x4e, 0xbc, 0x6a, 0x23, 0xd9, 0xb1, 0xf1, 0x3a, 0x42, 0xbd, 0xf1, 0x27, 0xf8, 0x27, 0xfc,
|
||||
0x40, 0xe4, 0xf5, 0x07, 0x69, 0x90, 0x10, 0xca, 0xcd, 0xf3, 0xde, 0xec, 0x9b, 0x37, 0x1f, 0x09,
|
||||
0x40, 0xec, 0xc7, 0xf3, 0x41, 0x9c, 0x44, 0x69, 0x84, 0x5a, 0xf6, 0xdd, 0x3d, 0x0c, 0x79, 0xea,
|
||||
0x95, 0x18, 0xf9, 0x0e, 0x8d, 0x6b, 0x1e, 0xce, 0x79, 0x82, 0x08, 0xda, 0xca, 0x0b, 0x79, 0x47,
|
||||
0xe9, 0x29, 0x7d, 0x9d, 0xc9, 0x6f, 0x7c, 0x0a, 0x7a, 0x28, 0xd9, 0xd9, 0xd2, 0xef, 0xa8, 0x3d,
|
||||
0xa5, 0xaf, 0xb1, 0x83, 0x1c, 0xb0, 0xfd, 0x8c, 0x8c, 0x39, 0x4f, 0x66, 0xeb, 0x24, 0x10, 0x9d,
|
||||
0x5a, 0xaf, 0xd6, 0xd7, 0xd9, 0x41, 0x06, 0xdc, 0x24, 0x81, 0xc0, 0xe7, 0xd0, 0x5c, 0x04, 0x4b,
|
||||
0xbe, 0x4a, 0x73, 0x5a, 0x93, 0x34, 0xe4, 0x50, 0x96, 0x40, 0x06, 0x70, 0xc4, 0xf8, 0xb7, 0x35,
|
||||
0x17, 0xe9, 0x05, 0xf7, 0x7c, 0x9e, 0xe0, 0x33, 0x80, 0x45, 0xb0, 0x16, 0x69, 0x5e, 0x4c, 0x91,
|
||||
0xc5, 0xf4, 0x02, 0xb1, 0x7d, 0xc2, 0xa0, 0xc5, 0xb8, 0x88, 0xa3, 0x95, 0xe0, 0xff, 0xf5, 0x00,
|
||||
0x5f, 0x40, 0x9d, 0x27, 0x49, 0x94, 0x48, 0xdf, 0xcd, 0x61, 0x73, 0x20, 0x27, 0x61, 0x65, 0x10,
|
||||
0xcb, 0x19, 0x72, 0x0e, 0x75, 0x19, 0xe3, 0x4b, 0xd0, 0xd2, 0xfb, 0x38, 0xef, 0xbd, 0x35, 0x3c,
|
||||
0xde, 0x48, 0x75, 0xef, 0x63, 0xce, 0x24, 0x89, 0x1d, 0xd8, 0x0f, 0xb9, 0x10, 0xde, 0x2d, 0x97,
|
||||
0x92, 0x3a, 0x2b, 0x43, 0x42, 0x01, 0x5c, 0x11, 0x15, 0xed, 0xe0, 0x6b, 0x68, 0xdc, 0x49, 0x87,
|
||||
0x52, 0xae, 0x39, 0x3c, 0xcd, 0xe5, 0x1e, 0x74, 0xcb, 0x8a, 0x14, 0x6c, 0x43, 0x7d, 0x11, 0xad,
|
||||
0x57, 0xa9, 0x94, 0x3c, 0x62, 0x79, 0x40, 0x4c, 0xd0, 0xdd, 0x65, 0xc8, 0x45, 0xea, 0x85, 0x31,
|
||||
0x76, 0xe1, 0x20, 0xbe, 0xbb, 0x17, 0xcb, 0x85, 0x17, 0x48, 0xc5, 0x1a, 0xab, 0xe2, 0xcc, 0x53,
|
||||
0x10, 0xdd, 0x4a, 0x4a, 0x95, 0x54, 0x19, 0x92, 0x1f, 0x0a, 0x34, 0xa5, 0xa9, 0x7c, 0x66, 0xf8,
|
||||
0x66, 0xcb, 0x55, 0xbb, 0x74, 0xb5, 0x39, 0xd3, 0x7f, 0xdb, 0xc2, 0xb7, 0xa0, 0xa7, 0xa5, 0xad,
|
||||
0x4e, 0x4d, 0xca, 0x14, 0xb3, 0xaa, 0xdc, 0xb2, 0x3f, 0x19, 0xe4, 0x03, 0xb4, 0xcc, 0x20, 0x88,
|
||||
0x16, 0xf6, 0x78, 0x97, 0xd1, 0x10, 0x0a, 0xc7, 0xd5, 0xf3, 0x9d, 0x9a, 0x68, 0x81, 0x5a, 0x9d,
|
||||
0xad, 0xba, 0xf4, 0x89, 0x07, 0xc6, 0x05, 0xf7, 0x92, 0x74, 0xce, 0xbd, 0x74, 0xa7, 0x65, 0xf5,
|
||||
0x40, 0xcb, 0x0e, 0xbc, 0xb8, 0xa8, 0xc3, 0x41, 0xf1, 0x4b, 0x9a, 0x70, 0x9e, 0x30, 0xc9, 0x10,
|
||||
0x13, 0x4e, 0x36, 0x4a, 0xec, 0xe2, 0xfa, 0xd5, 0x4f, 0x05, 0xf4, 0xea, 0xf4, 0xb0, 0x01, 0x2a,
|
||||
0xbd, 0x34, 0xf6, 0xb0, 0x09, 0xfb, 0x37, 0xce, 0xa5, 0x43, 0xbf, 0x38, 0x86, 0x82, 0x6d, 0x30,
|
||||
0x1c, 0xea, 0xce, 0x46, 0x94, 0xba, 0x53, 0x97, 0x99, 0x93, 0x89, 0x35, 0x36, 0x54, 0x3c, 0x85,
|
||||
0xe3, 0xa9, 0x4b, 0x99, 0x35, 0x73, 0xe9, 0xf5, 0x68, 0xea, 0x52, 0xc7, 0x32, 0x6a, 0xd8, 0x81,
|
||||
0xb6, 0x79, 0xc5, 0x2c, 0x73, 0xfc, 0xf5, 0x61, 0xba, 0x96, 0x31, 0xb6, 0xf3, 0x91, 0x5e, 0x4f,
|
||||
0x4c, 0xd7, 0x1e, 0x5d, 0x59, 0xb3, 0xcf, 0x16, 0x9b, 0xda, 0xd4, 0x31, 0xea, 0x99, 0x3c, 0xb3,
|
||||
0x3e, 0xd9, 0xd4, 0x99, 0x65, 0x55, 0xce, 0xe9, 0x8d, 0x33, 0x36, 0x1a, 0xc3, 0x5f, 0x0a, 0xa8,
|
||||
0x93, 0x31, 0x9e, 0x41, 0xcd, 0x15, 0x11, 0x1a, 0xc5, 0xde, 0xab, 0xb3, 0xef, 0x9e, 0x6c, 0x20,
|
||||
0x79, 0x63, 0x64, 0xaf, 0xaf, 0x9c, 0x29, 0xf8, 0x1e, 0xf6, 0x8b, 0x3d, 0x62, 0xd1, 0xf9, 0xc3,
|
||||
0xab, 0xe8, 0x3e, 0xda, 0x42, 0xcb, 0xd7, 0x38, 0x02, 0xbd, 0x9a, 0x26, 0x3e, 0xce, 0xb3, 0xb6,
|
||||
0x37, 0xd8, 0x7d, 0xf2, 0x17, 0xbe, 0x59, 0x7d, 0xde, 0x90, 0xff, 0x73, 0xef, 0x7e, 0x07, 0x00,
|
||||
0x00, 0xff, 0xff, 0x34, 0x73, 0xe9, 0xc4, 0x09, 0x05, 0x00, 0x00,
|
||||
}
|
29
proto/metapb.proto
Normal file
29
proto/metapb.proto
Normal file
@ -0,0 +1,29 @@
|
||||
syntax = "proto3";
|
||||
package metapb;
|
||||
|
||||
|
||||
message Cluster {
|
||||
uint64 id = 1;
|
||||
// max peer count for a region.
|
||||
// pd will do the auto-balance if region peer count mismatches.
|
||||
uint32 max_peer_count = 1024;
|
||||
// more attributes......
|
||||
}
|
||||
|
||||
enum PeerRole {
|
||||
|
||||
Master = 0;
|
||||
|
||||
Reader = 1;
|
||||
|
||||
Write = 2;
|
||||
|
||||
Proxyer = 3;
|
||||
|
||||
}
|
||||
|
||||
message Peer {
|
||||
uint64 id = 1;
|
||||
uint64 peer_id = 2;
|
||||
PeerRole role = 3;
|
||||
}
|
82
proto/pdpb.proto
Normal file
82
proto/pdpb.proto
Normal file
@ -0,0 +1,82 @@
|
||||
syntax = "proto3";
|
||||
package pdpb;
|
||||
|
||||
import "metapb.proto";
|
||||
|
||||
service PD {
|
||||
rpc Tso(stream TsoRequest) returns (stream TsoResponse) {}
|
||||
rpc AllocID(AllocIDRequest) returns (AllocIDResponse) {}
|
||||
rpc Heartbeat(stream HeartbeatRequest) returns (stream HeartbeatResponse) {}
|
||||
}
|
||||
|
||||
message Member {
|
||||
// name is the name of the PD member.
|
||||
string name = 1;
|
||||
// member_id is the unique id of the PD member.
|
||||
uint64 member_id = 2;
|
||||
repeated string peer_urls = 3;
|
||||
repeated string client_urls = 4;
|
||||
}
|
||||
|
||||
message RequestHeader {
|
||||
// cluster_id is the ID of the cluster which be sent to.
|
||||
uint64 cluster_id = 1;
|
||||
}
|
||||
|
||||
message ResponseHeader {
|
||||
// cluster_id is the ID of the cluster which sent the response.
|
||||
uint64 cluster_id = 1;
|
||||
Error error = 2;
|
||||
}
|
||||
|
||||
enum ErrorType {
|
||||
OK = 0;
|
||||
UNKNOWN = 1;
|
||||
NOT_BOOTSTRAPPED = 2;
|
||||
STORE_TOMBSTONE = 3;
|
||||
ALREADY_BOOTSTRAPPED = 4;
|
||||
INCOMPATIBLE_VERSION = 5;
|
||||
REGION_NOT_FOUND = 6;
|
||||
}
|
||||
|
||||
message Error {
|
||||
ErrorType type = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
message TsoRequest {
|
||||
RequestHeader header = 1;
|
||||
|
||||
uint32 count = 2;
|
||||
}
|
||||
|
||||
message Timestamp {
|
||||
int64 physical = 1;
|
||||
int64 logical = 2;
|
||||
}
|
||||
|
||||
message TsoResponse {
|
||||
ResponseHeader header = 1;
|
||||
|
||||
uint32 count = 2;
|
||||
Timestamp timestamp = 3;
|
||||
}
|
||||
|
||||
message AllocIDRequest {
|
||||
RequestHeader header = 1;
|
||||
}
|
||||
|
||||
message AllocIDResponse {
|
||||
ResponseHeader header = 1;
|
||||
uint64 id = 2;
|
||||
}
|
||||
|
||||
message HeartbeatRequest {
|
||||
RequestHeader header = 1;
|
||||
metapb.Peer peer = 2;
|
||||
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
ResponseHeader header = 1;
|
||||
}
|
@ -1,64 +1,60 @@
|
||||
package pulsar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"suvlim/pulsar/schema"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
// import (
|
||||
// "fmt"
|
||||
// "suvlim/pulsar/schema"
|
||||
// "sync"
|
||||
// "time"
|
||||
// )
|
||||
|
||||
type QueryNode struct {
|
||||
mc MessageClient
|
||||
}
|
||||
// type QueryNode struct {
|
||||
// mc MessageClient
|
||||
// }
|
||||
|
||||
func (qn *QueryNode)doQueryNode(wg sync.WaitGroup) {
|
||||
wg.Add(3)
|
||||
go qn.insert_query(qn.mc.InsertMsg, wg)
|
||||
go qn.delete_query(qn.mc.DeleteMsg, wg)
|
||||
<<<<<<< HEAD
|
||||
go qn.search_query(qn.mc.searchMsg, wg)
|
||||
=======
|
||||
go qn.search_query(qn.mc.SearchMsg, wg)
|
||||
>>>>>>> 1ab497232c9c1179499c456a250dd6e73a3259b2
|
||||
wg.Wait()
|
||||
}
|
||||
// func (qn *QueryNode)doQueryNode(wg sync.WaitGroup) {
|
||||
// wg.Add(3)
|
||||
// go qn.insert_query(qn.mc.InsertMsg, wg)
|
||||
// go qn.delete_query(qn.mc.DeleteMsg, wg)
|
||||
// go qn.search_query(qn.mc.SearchMsg, wg)
|
||||
// wg.Wait()
|
||||
// }
|
||||
|
||||
|
||||
func (qn *QueryNode) PrepareBatchMsg() {
|
||||
qn.mc.PrepareBatchMsg(JobType(0))
|
||||
}
|
||||
func main() {
|
||||
// func (qn *QueryNode) PrepareBatchMsg() {
|
||||
// qn.mc.PrepareBatchMsg(JobType(0))
|
||||
// }
|
||||
// func main() {
|
||||
|
||||
mc := MessageClient{}
|
||||
topics := []string{"insert", "delete"}
|
||||
mc.InitClient("pulsar://localhost:6650", topics)
|
||||
// mc := MessageClient{}
|
||||
// topics := []string{"insert", "delete"}
|
||||
// mc.InitClient("pulsar://localhost:6650", topics)
|
||||
|
||||
go mc.ReceiveMessage()
|
||||
// go mc.ReceiveMessage()
|
||||
|
||||
qn := QueryNode{mc}
|
||||
// qn := QueryNode{mc}
|
||||
|
||||
for {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
qn.PrepareBatchMsg()
|
||||
qn.doQueryNode(wg)
|
||||
fmt.Println("do a batch in 200ms")
|
||||
}
|
||||
}
|
||||
// for {
|
||||
// time.Sleep(200 * time.Millisecond)
|
||||
// qn.PrepareBatchMsg()
|
||||
// qn.doQueryNode(wg)
|
||||
// fmt.Println("do a batch in 200ms")
|
||||
// }
|
||||
// }
|
||||
|
||||
func (qn *QueryNode) insert_query(data []*schema.InsertMsg, wg sync.WaitGroup) schema.Status{
|
||||
wg.Done()
|
||||
return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
}
|
||||
// func (qn *QueryNode) insert_query(data []*schema.InsertMsg, wg sync.WaitGroup) schema.Status{
|
||||
// wg.Done()
|
||||
// return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
// }
|
||||
|
||||
func (qn *QueryNode) delete_query(data []*schema.DeleteMsg, wg sync.WaitGroup) schema.Status{
|
||||
wg.Done()
|
||||
return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
}
|
||||
// func (qn *QueryNode) delete_query(data []*schema.DeleteMsg, wg sync.WaitGroup) schema.Status{
|
||||
// wg.Done()
|
||||
// return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
// }
|
||||
|
||||
func (qn *QueryNode) search_query(data []*schema.SearchMsg, wg sync.WaitGroup) schema.Status{
|
||||
wg.Done()
|
||||
return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
}
|
||||
// func (qn *QueryNode) search_query(data []*schema.SearchMsg, wg sync.WaitGroup) schema.Status{
|
||||
// wg.Done()
|
||||
// return schema.Status{schema.ErrorCode_SUCCESS, ""}
|
||||
// }
|
||||
|
||||
|
||||
|
||||
|
@ -101,14 +101,14 @@ const (
|
||||
|
||||
type PulsarMessage struct {
|
||||
CollectionName string
|
||||
Fields []*FieldValue
|
||||
EntityId int64
|
||||
PartitionTag string
|
||||
VectorParam *VectorParam
|
||||
Segments []*SegmentRecord
|
||||
Timestamp int64
|
||||
ClientId int64
|
||||
MsgType OpType
|
||||
Fields []*FieldValue
|
||||
EntityId int64
|
||||
PartitionTag string
|
||||
VectorParam *VectorParam
|
||||
Segments []*SegmentRecord
|
||||
Timestamp int64
|
||||
ClientId int64
|
||||
MsgType OpType
|
||||
}
|
||||
|
||||
type Message interface {
|
||||
@ -122,6 +122,7 @@ type InsertMsg struct {
|
||||
Fields []*FieldValue
|
||||
EntityId int64
|
||||
PartitionTag string
|
||||
SegmentId uint64
|
||||
Timestamp uint64
|
||||
ClientId int64
|
||||
MsgType OpType
|
||||
@ -189,5 +190,5 @@ func (kms *Key2SegMsg) GetType() OpType {
|
||||
}
|
||||
|
||||
type SyncEofMsg struct {
|
||||
MsgType OpType
|
||||
}
|
||||
MsgType OpType
|
||||
}
|
||||
|
@ -1,5 +1,14 @@
|
||||
package reader
|
||||
|
||||
/*
|
||||
|
||||
#cgo CFLAGS: -I../core/include
|
||||
|
||||
#cgo LDFLAGS: -L../core/lib -lmilvus_dog_segment -Wl,-rpath=../core/lib
|
||||
|
||||
#include "partition_c.h"
|
||||
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"errors"
|
||||
@ -11,29 +20,20 @@ type Collection struct {
|
||||
Partitions []*Partition
|
||||
}
|
||||
|
||||
func (c *Collection) NewPartition(partitionName string) (*Partition, error) {
|
||||
func (c *Collection) NewPartition(partitionName string) *Partition {
|
||||
cName := C.CString(partitionName)
|
||||
partitionPtr, status := C.NewPartition(c.CollectionPtr, cName)
|
||||
|
||||
if status != 0 {
|
||||
return nil, errors.New("create partition failed")
|
||||
}
|
||||
partitionPtr := C.NewPartition(c.CollectionPtr, cName)
|
||||
|
||||
var newPartition = &Partition{PartitionPtr: partitionPtr, PartitionName: partitionName}
|
||||
c.Partitions = append(c.Partitions, newPartition)
|
||||
return newPartition, nil
|
||||
return newPartition
|
||||
}
|
||||
|
||||
func (c *Collection) DeletePartition(partitionName string) error {
|
||||
cName := C.CString(partitionName)
|
||||
status := C.DeletePartition(c.CollectionPtr, cName)
|
||||
|
||||
if status != 0 {
|
||||
return errors.New("create partition failed")
|
||||
}
|
||||
func (c *Collection) DeletePartition(partition *Partition) {
|
||||
cPtr := partition.PartitionPtr
|
||||
C.DeletePartition(cPtr)
|
||||
|
||||
// TODO: remove from c.Partitions
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Collection) GetSegments() ([]*Segment, error) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
package reader
|
||||
|
||||
import "suvlim/pulsar/schema"
|
||||
import "github.com/czs007/suvlim/pulsar/schema"
|
||||
|
||||
type IndexConfig struct {}
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
package reader
|
||||
|
||||
import "C"
|
||||
import "errors"
|
||||
|
||||
type Partition struct {
|
||||
PartitionPtr *C.CPartition
|
||||
@ -9,25 +8,17 @@ type Partition struct {
|
||||
Segments []*Segment
|
||||
}
|
||||
|
||||
func (p *Partition) NewSegment(segmentId uint64) (*Segment, error) {
|
||||
segmentPtr, status := C.NewSegment(p.PartitionPtr, segmentId)
|
||||
|
||||
if status != 0 {
|
||||
return nil, errors.New("create segment failed")
|
||||
}
|
||||
func (p *Partition) NewSegment(segmentId uint64) *Segment {
|
||||
segmentPtr := C.NewSegment(p.PartitionPtr, segmentId)
|
||||
|
||||
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentId: segmentId}
|
||||
p.Segments = append(p.Segments, newSegment)
|
||||
return newSegment, nil
|
||||
return newSegment
|
||||
}
|
||||
|
||||
func (p *Partition) DeleteSegment() error {
|
||||
status := C.DeleteSegment(p.PartitionPtr)
|
||||
|
||||
if status != 0 {
|
||||
return errors.New("delete segment failed")
|
||||
}
|
||||
func (p *Partition) DeleteSegment(segment *Segment) {
|
||||
cPtr := segment.SegmentPtr
|
||||
C.DeleteSegment(cPtr)
|
||||
|
||||
// TODO: remove from p.Segments
|
||||
return nil
|
||||
}
|
||||
|
@ -4,8 +4,8 @@ import "C"
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"suvlim/pulsar"
|
||||
"suvlim/pulsar/schema"
|
||||
"github.com/czs007/suvlim/pulsar"
|
||||
"github.com/czs007/suvlim/pulsar/schema"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -51,30 +51,22 @@ func NewQueryNode(timeSync uint64) *QueryNode {
|
||||
// TODO: Schema
|
||||
type CollectionSchema string
|
||||
|
||||
func (node *QueryNode) NewCollection(collectionName string, schema CollectionSchema) (*Collection, error) {
|
||||
func (node *QueryNode) NewCollection(collectionName string, schema CollectionSchema) *Collection {
|
||||
cName := C.CString(collectionName)
|
||||
cSchema := C.CString(schema)
|
||||
collection, status := C.NewCollection(cName, cSchema)
|
||||
|
||||
if status != 0 {
|
||||
return nil, errors.New("create collection failed")
|
||||
}
|
||||
collection := C.NewCollection(cName, cSchema)
|
||||
|
||||
var newCollection = &Collection{CollectionPtr: collection, CollectionName: collectionName}
|
||||
node.Collections = append(node.Collections, newCollection)
|
||||
|
||||
return newCollection, nil
|
||||
return newCollection
|
||||
}
|
||||
|
||||
func (node *QueryNode) DeleteCollection(collection *Collection) error {
|
||||
status := C.DeleteCollection(collection.CollectionPtr)
|
||||
|
||||
if status != 0 {
|
||||
return errors.New("delete collection failed")
|
||||
}
|
||||
func (node *QueryNode) DeleteCollection(collection *Collection) {
|
||||
cPtr := collection.CollectionPtr
|
||||
C.DeleteCollection(cPtr)
|
||||
|
||||
// TODO: remove from node.Collections
|
||||
return nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -141,10 +133,10 @@ func (node *QueryNode) GetTimeSync() uint64 {
|
||||
func (node *QueryNode) InitQueryNodeCollection() {
|
||||
// TODO: remove hard code, add collection creation request
|
||||
// TODO: error handle
|
||||
var newCollection, _ = node.NewCollection("collection1", "fakeSchema")
|
||||
var newPartition, _ = newCollection.NewPartition("partition1")
|
||||
var newCollection = node.NewCollection("collection1", "fakeSchema")
|
||||
var newPartition = newCollection.NewPartition("partition1")
|
||||
// TODO: add segment id
|
||||
var _, _ = newPartition.NewSegment(0)
|
||||
var _ = newPartition.NewSegment(0)
|
||||
}
|
||||
|
||||
func (node *QueryNode) SegmentsManagement() {
|
||||
@ -156,7 +148,7 @@ func (node *QueryNode) SegmentsManagement() {
|
||||
if timeSync >= segment.SegmentCloseTime {
|
||||
segment.Close()
|
||||
// TODO: add atomic segment id
|
||||
var newSegment, _ = partition.NewSegment(0)
|
||||
var newSegment = partition.NewSegment(0)
|
||||
newSegment.SegmentCloseTime = timeSync + SegmentLifetime
|
||||
partition.Segments = append(partition.Segments, newSegment)
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ package reader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"suvlim/pulsar/schema"
|
||||
"github.com/czs007/suvlim/pulsar/schema"
|
||||
)
|
||||
|
||||
type ResultEntityIds []int64
|
||||
|
@ -11,14 +11,14 @@ package reader
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"suvlim/pulsar/schema"
|
||||
"github.com/czs007/suvlim/pulsar/schema"
|
||||
)
|
||||
|
||||
const SegmentLifetime = 20000
|
||||
|
||||
type Segment struct {
|
||||
SegmentPtr *C.SegmentBase
|
||||
SegmentId int32
|
||||
SegmentId uint64
|
||||
SegmentCloseTime uint64
|
||||
}
|
||||
|
||||
|
17
reader/segment_test.go
Normal file
17
reader/segment_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package reader
|
||||
|
||||
import (
|
||||
//"github.com/realistschuckle/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConstructorAndDestructor(t *testing.T) {
|
||||
node := NewQueryNode(0)
|
||||
var collection = node.NewCollection("collection0", "fake schema")
|
||||
var partition = collection.NewPartition("partition0")
|
||||
var segment = partition.NewSegment(0)
|
||||
|
||||
partition.DeleteSegment(segment)
|
||||
collection.DeletePartition(partition)
|
||||
node.DeleteCollection(collection)
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIntMinBasic(t *testing.T) {
|
||||
fmt.Println("Hello go testing")
|
||||
}
|
157
util/etcdutil/etcdutil.go
Normal file
157
util/etcdutil/etcdutil.go
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pingcap/errors"
|
||||
"github.com/pingcap/log"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/etcdserver"
|
||||
"go.etcd.io/etcd/pkg/types"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDialTimeout is the maximum amount of time a dial will wait for a
|
||||
// connection to setup. 30s is long enough for most of the network conditions.
|
||||
DefaultDialTimeout = 30 * time.Second
|
||||
|
||||
// DefaultRequestTimeout 10s is long enough for most of etcd clusters.
|
||||
DefaultRequestTimeout = 10 * time.Second
|
||||
|
||||
// DefaultSlowRequestTime 1s for the threshold for normal request, for those
|
||||
// longer then 1s, they are considered as slow requests.
|
||||
DefaultSlowRequestTime = 1 * time.Second
|
||||
)
|
||||
|
||||
// CheckClusterID checks Etcd's cluster ID, returns an error if mismatch.
|
||||
// This function will never block even quorum is not satisfied.
|
||||
func CheckClusterID(localClusterID types.ID, um types.URLsMap, tlsConfig *tls.Config) error {
|
||||
if len(um) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var peerURLs []string
|
||||
for _, urls := range um {
|
||||
peerURLs = append(peerURLs, urls.StringSlice()...)
|
||||
}
|
||||
|
||||
for _, u := range peerURLs {
|
||||
trp := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
remoteCluster, gerr := etcdserver.GetClusterFromRemotePeers(nil, []string{u}, trp)
|
||||
trp.CloseIdleConnections()
|
||||
if gerr != nil {
|
||||
// Do not return error, because other members may be not ready.
|
||||
log.Error("failed to get cluster from remote")
|
||||
continue
|
||||
}
|
||||
|
||||
remoteClusterID := remoteCluster.ID()
|
||||
if remoteClusterID != localClusterID {
|
||||
return errors.Errorf("Etcd cluster ID mismatch, expect %d, got %d", localClusterID, remoteClusterID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddEtcdMember adds an etcd member.
|
||||
func AddEtcdMember(client *clientv3.Client, urls []string) (*clientv3.MemberAddResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout)
|
||||
addResp, err := client.MemberAdd(ctx, urls)
|
||||
cancel()
|
||||
return addResp, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// ListEtcdMembers returns a list of internal etcd members.
|
||||
func ListEtcdMembers(client *clientv3.Client) (*clientv3.MemberListResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout)
|
||||
listResp, err := client.MemberList(ctx)
|
||||
cancel()
|
||||
return listResp, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// RemoveEtcdMember removes a member by the given id.
|
||||
func RemoveEtcdMember(client *clientv3.Client, id uint64) (*clientv3.MemberRemoveResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(client.Ctx(), DefaultRequestTimeout)
|
||||
rmResp, err := client.MemberRemove(ctx, id)
|
||||
cancel()
|
||||
return rmResp, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// EtcdKVGet returns the etcd GetResponse by given key or key prefix
|
||||
func EtcdKVGet(c *clientv3.Client, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(c.Ctx(), DefaultRequestTimeout)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
resp, err := clientv3.NewKV(c).Get(ctx, key, opts...)
|
||||
if err != nil {
|
||||
log.Error("load from etcd meet error")
|
||||
}
|
||||
if cost := time.Since(start); cost > DefaultSlowRequestTime {
|
||||
log.Warn("kv gets too slow", zap.String("request-key", key), zap.Duration("cost", cost), zap.Error(err))
|
||||
}
|
||||
|
||||
return resp, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// GetValue gets value with key from etcd.
|
||||
func GetValue(c *clientv3.Client, key string, opts ...clientv3.OpOption) ([]byte, error) {
|
||||
resp, err := get(c, key, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return resp.Kvs[0].Value, nil
|
||||
}
|
||||
|
||||
func get(c *clientv3.Client, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||
resp, err := EtcdKVGet(c, key, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n := len(resp.Kvs); n == 0 {
|
||||
return nil, nil
|
||||
} else if n > 1 {
|
||||
return nil, errors.Errorf("invalid get value resp %v, must only one", resp.Kvs)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetProtoMsgWithModRev returns boolean to indicate whether the key exists or not.
|
||||
func GetProtoMsgWithModRev(c *clientv3.Client, key string, msg proto.Message, opts ...clientv3.OpOption) (bool, int64, error) {
|
||||
resp, err := get(c, key, opts...)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
}
|
||||
if resp == nil {
|
||||
return false, 0, nil
|
||||
}
|
||||
value := resp.Kvs[0].Value
|
||||
if err = proto.Unmarshal(value, msg); err != nil {
|
||||
return false, 0, errors.WithStack(err)
|
||||
}
|
||||
return true, resp.Kvs[0].ModRevision, nil
|
||||
}
|
189
util/etcdutil/etcdutil_test.go
Normal file
189
util/etcdutil/etcdutil_test.go
Normal file
@ -0,0 +1,189 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
"github.com/tikv/pd/pkg/tempurl"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/embed"
|
||||
"go.etcd.io/etcd/pkg/types"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
var _ = Suite(&testEtcdutilSuite{})
|
||||
|
||||
type testEtcdutilSuite struct{}
|
||||
|
||||
func newTestSingleConfig() *embed.Config {
|
||||
cfg := embed.NewConfig()
|
||||
cfg.Name = "test_etcd"
|
||||
cfg.Dir, _ = ioutil.TempDir("/tmp", "test_etcd")
|
||||
cfg.WalDir = ""
|
||||
cfg.Logger = "zap"
|
||||
cfg.LogOutputs = []string{"stdout"}
|
||||
|
||||
pu, _ := url.Parse(tempurl.Alloc())
|
||||
cfg.LPUrls = []url.URL{*pu}
|
||||
cfg.APUrls = cfg.LPUrls
|
||||
cu, _ := url.Parse(tempurl.Alloc())
|
||||
cfg.LCUrls = []url.URL{*cu}
|
||||
cfg.ACUrls = cfg.LCUrls
|
||||
|
||||
cfg.StrictReconfigCheck = false
|
||||
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, &cfg.LPUrls[0])
|
||||
cfg.ClusterState = embed.ClusterStateFlagNew
|
||||
return cfg
|
||||
}
|
||||
|
||||
func cleanConfig(cfg *embed.Config) {
|
||||
// Clean data directory
|
||||
os.RemoveAll(cfg.Dir)
|
||||
}
|
||||
|
||||
func (s *testEtcdutilSuite) TestMemberHelpers(c *C) {
|
||||
cfg1 := newTestSingleConfig()
|
||||
etcd1, err := embed.StartEtcd(cfg1)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
ep1 := cfg1.LCUrls[0].String()
|
||||
client1, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep1},
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
<-etcd1.Server.ReadyNotify()
|
||||
|
||||
// Test ListEtcdMembers
|
||||
listResp1, err := ListEtcdMembers(client1)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(listResp1.Members), Equals, 1)
|
||||
// types.ID is an alias of uint64.
|
||||
c.Assert(listResp1.Members[0].ID, Equals, uint64(etcd1.Server.ID()))
|
||||
|
||||
// Test AddEtcdMember
|
||||
// Make a new etcd config.
|
||||
cfg2 := newTestSingleConfig()
|
||||
cfg2.Name = "etcd2"
|
||||
cfg2.InitialCluster = cfg1.InitialCluster + fmt.Sprintf(",%s=%s", cfg2.Name, &cfg2.LPUrls[0])
|
||||
cfg2.ClusterState = embed.ClusterStateFlagExisting
|
||||
|
||||
// Add it to the cluster above.
|
||||
peerURL := cfg2.LPUrls[0].String()
|
||||
addResp, err := AddEtcdMember(client1, []string{peerURL})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
etcd2, err := embed.StartEtcd(cfg2)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(addResp.Member.ID, Equals, uint64(etcd2.Server.ID()))
|
||||
|
||||
ep2 := cfg2.LCUrls[0].String()
|
||||
client2, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep2},
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
<-etcd2.Server.ReadyNotify()
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
listResp2, err := ListEtcdMembers(client2)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(listResp2.Members), Equals, 2)
|
||||
for _, m := range listResp2.Members {
|
||||
switch m.ID {
|
||||
case uint64(etcd1.Server.ID()):
|
||||
case uint64(etcd2.Server.ID()):
|
||||
default:
|
||||
c.Fatalf("unknown member: %v", m)
|
||||
}
|
||||
}
|
||||
|
||||
// Test CheckClusterID
|
||||
urlmap, err := types.NewURLsMap(cfg2.InitialCluster)
|
||||
c.Assert(err, IsNil)
|
||||
err = CheckClusterID(etcd1.Server.Cluster().ID(), urlmap, &tls.Config{})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
// Test RemoveEtcdMember
|
||||
_, err = RemoveEtcdMember(client1, uint64(etcd2.Server.ID()))
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
listResp3, err := ListEtcdMembers(client1)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(listResp3.Members), Equals, 1)
|
||||
c.Assert(listResp3.Members[0].ID, Equals, uint64(etcd1.Server.ID()))
|
||||
|
||||
etcd1.Close()
|
||||
etcd2.Close()
|
||||
cleanConfig(cfg1)
|
||||
cleanConfig(cfg2)
|
||||
}
|
||||
|
||||
func (s *testEtcdutilSuite) TestEtcdKVGet(c *C) {
|
||||
cfg := newTestSingleConfig()
|
||||
etcd, err := embed.StartEtcd(cfg)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
ep := cfg.LCUrls[0].String()
|
||||
client, err := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ep},
|
||||
})
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
<-etcd.Server.ReadyNotify()
|
||||
|
||||
keys := []string{"test/key1", "test/key2", "test/key3", "test/key4", "test/key5"}
|
||||
vals := []string{"val1", "val2", "val3", "val4", "val5"}
|
||||
|
||||
kv := clientv3.NewKV(client)
|
||||
for i := range keys {
|
||||
_, err = kv.Put(context.TODO(), keys[i], vals[i])
|
||||
c.Assert(err, IsNil)
|
||||
}
|
||||
|
||||
// Test simple point get
|
||||
resp, err := EtcdKVGet(client, "test/key1")
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(resp.Kvs[0].Value), Equals, "val1")
|
||||
|
||||
// Test range get
|
||||
withRange := clientv3.WithRange("test/zzzz")
|
||||
withLimit := clientv3.WithLimit(3)
|
||||
resp, err = EtcdKVGet(client, "test/", withRange, withLimit, clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(resp.Kvs), Equals, 3)
|
||||
|
||||
for i := range resp.Kvs {
|
||||
c.Assert(string(resp.Kvs[i].Key), Equals, keys[i])
|
||||
c.Assert(string(resp.Kvs[i].Value), Equals, vals[i])
|
||||
}
|
||||
|
||||
lastKey := string(resp.Kvs[len(resp.Kvs)-1].Key)
|
||||
next := clientv3.GetPrefixRangeEnd(lastKey)
|
||||
resp, err = EtcdKVGet(client, next, withRange, withLimit, clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend))
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(len(resp.Kvs), Equals, 2)
|
||||
cleanConfig(cfg)
|
||||
}
|
103
util/grpcutil/grpcutil.go
Normal file
103
util/grpcutil/grpcutil.go
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright 2019 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/url"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"go.etcd.io/etcd/pkg/transport"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// SecurityConfig is the configuration for supporting tls.
|
||||
type SecurityConfig struct {
|
||||
// CAPath is the path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty
|
||||
CAPath string `toml:"cacert-path" json:"cacert-path"`
|
||||
// CertPath is the path of file that contains X509 certificate in PEM format.
|
||||
CertPath string `toml:"cert-path" json:"cert-path"`
|
||||
// KeyPath is the path of file that contains X509 key in PEM format.
|
||||
KeyPath string `toml:"key-path" json:"key-path"`
|
||||
// CertAllowedCN is a CN which must be provided by a client
|
||||
CertAllowedCN []string `toml:"cert-allowed-cn" json:"cert-allowed-cn"`
|
||||
}
|
||||
|
||||
// ToTLSConfig generates tls config.
|
||||
func (s SecurityConfig) ToTLSConfig() (*tls.Config, error) {
|
||||
if len(s.CertPath) == 0 && len(s.KeyPath) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
allowedCN, err := s.GetOneAllowedCN()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: s.CertPath,
|
||||
KeyFile: s.KeyPath,
|
||||
TrustedCAFile: s.CAPath,
|
||||
AllowedCN: allowedCN,
|
||||
}
|
||||
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// GetOneAllowedCN only gets the first one CN.
|
||||
func (s SecurityConfig) GetOneAllowedCN() (string, error) {
|
||||
switch len(s.CertAllowedCN) {
|
||||
case 1:
|
||||
return s.CertAllowedCN[0], nil
|
||||
case 0:
|
||||
return "", nil
|
||||
default:
|
||||
return "", errors.New("Currently only supports one CN")
|
||||
}
|
||||
}
|
||||
|
||||
// GetClientConn returns a gRPC client connection.
|
||||
// creates a client connection to the given target. By default, it's
|
||||
// a non-blocking dial (the function won't wait for connections to be
|
||||
// established, and connecting happens in the background). To make it a blocking
|
||||
// dial, use WithBlock() dial option.
|
||||
//
|
||||
// In the non-blocking case, the ctx does not act against the connection. It
|
||||
// only controls the setup steps.
|
||||
//
|
||||
// In the blocking case, ctx can be used to cancel or expire the pending
|
||||
// connection. Once this function returns, the cancellation and expiration of
|
||||
// ctx will be noop. Users should call ClientConn.Close to terminate all the
|
||||
// pending operations after this function returns.
|
||||
func GetClientConn(ctx context.Context, addr string, tlsCfg *tls.Config, do ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opt := grpc.WithInsecure()
|
||||
if tlsCfg != nil {
|
||||
creds := credentials.NewTLS(tlsCfg)
|
||||
opt = grpc.WithTransportCredentials(creds)
|
||||
}
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
cc, err := grpc.DialContext(ctx, u.Host, append(do, opt)...)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return cc, nil
|
||||
}
|
294
util/logutil/log.go
Normal file
294
util/logutil/log.go
Normal file
@ -0,0 +1,294 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/pingcap/errors"
|
||||
zaplog "github.com/pingcap/log"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.etcd.io/etcd/raft"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLogTimeFormat = "2006/01/02 15:04:05.000"
|
||||
defaultLogMaxSize = 300 // MB
|
||||
defaultLogFormat = "text"
|
||||
defaultLogLevel = log.InfoLevel
|
||||
)
|
||||
|
||||
// FileLogConfig serializes file log related config in toml/json.
|
||||
type FileLogConfig struct {
|
||||
// Log filename, leave empty to disable file log.
|
||||
Filename string `toml:"filename" json:"filename"`
|
||||
// Max size for a single file, in MB.
|
||||
MaxSize int `toml:"max-size" json:"max-size"`
|
||||
// Max log keep days, default is never deleting.
|
||||
MaxDays int `toml:"max-days" json:"max-days"`
|
||||
// Maximum number of old log files to retain.
|
||||
MaxBackups int `toml:"max-backups" json:"max-backups"`
|
||||
}
|
||||
|
||||
// LogConfig serializes log related config in toml/json.
|
||||
type LogConfig struct {
|
||||
// Log level.
|
||||
Level string `toml:"level" json:"level"`
|
||||
// Log format. one of json, text, or console.
|
||||
Format string `toml:"format" json:"format"`
|
||||
// Disable automatic timestamps in output.
|
||||
DisableTimestamp bool `toml:"disable-timestamp" json:"disable-timestamp"`
|
||||
// File log config.
|
||||
File FileLogConfig `toml:"file" json:"file"`
|
||||
}
|
||||
|
||||
// redirectFormatter will redirect etcd logs to logrus logs.
|
||||
type redirectFormatter struct{}
|
||||
|
||||
// Format implements capnslog.Formatter hook.
|
||||
func (rf *redirectFormatter) Format(pkg string, level capnslog.LogLevel, depth int, entries ...interface{}) {
|
||||
if pkg != "" {
|
||||
pkg = fmt.Sprint(pkg, ": ")
|
||||
}
|
||||
|
||||
logStr := fmt.Sprint(pkg, entries)
|
||||
|
||||
switch level {
|
||||
case capnslog.CRITICAL:
|
||||
log.Fatalf(logStr)
|
||||
case capnslog.ERROR:
|
||||
log.Errorf(logStr)
|
||||
case capnslog.WARNING:
|
||||
log.Warningf(logStr)
|
||||
case capnslog.NOTICE:
|
||||
log.Infof(logStr)
|
||||
case capnslog.INFO:
|
||||
log.Infof(logStr)
|
||||
case capnslog.DEBUG, capnslog.TRACE:
|
||||
log.Debugf(logStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Flush only for implementing Formatter.
|
||||
func (rf *redirectFormatter) Flush() {}
|
||||
|
||||
// isSKippedPackageName tests wether path name is on log library calling stack.
|
||||
func isSkippedPackageName(name string) bool {
|
||||
return strings.Contains(name, "github.com/sirupsen/logrus") ||
|
||||
strings.Contains(name, "github.com/coreos/pkg/capnslog")
|
||||
}
|
||||
|
||||
// modifyHook injects file name and line pos into log entry.
|
||||
type contextHook struct{}
|
||||
|
||||
// Fire implements logrus.Hook interface
|
||||
// https://github.com/sirupsen/logrus/issues/63
|
||||
func (hook *contextHook) Fire(entry *log.Entry) error {
|
||||
pc := make([]uintptr, 4)
|
||||
cnt := runtime.Callers(6, pc)
|
||||
|
||||
for i := 0; i < cnt; i++ {
|
||||
fu := runtime.FuncForPC(pc[i] - 1)
|
||||
name := fu.Name()
|
||||
if !isSkippedPackageName(name) {
|
||||
file, line := fu.FileLine(pc[i] - 1)
|
||||
entry.Data["file"] = path.Base(file)
|
||||
entry.Data["line"] = line
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels implements logrus.Hook interface.
|
||||
func (hook *contextHook) Levels() []log.Level {
|
||||
return log.AllLevels
|
||||
}
|
||||
|
||||
// StringToLogLevel translates log level string to log level.
|
||||
func StringToLogLevel(level string) log.Level {
|
||||
switch strings.ToLower(level) {
|
||||
case "fatal":
|
||||
return log.FatalLevel
|
||||
case "error":
|
||||
return log.ErrorLevel
|
||||
case "warn", "warning":
|
||||
return log.WarnLevel
|
||||
case "debug":
|
||||
return log.DebugLevel
|
||||
case "info":
|
||||
return log.InfoLevel
|
||||
}
|
||||
return defaultLogLevel
|
||||
}
|
||||
|
||||
// StringToZapLogLevel translates log level string to log level.
|
||||
func StringToZapLogLevel(level string) zapcore.Level {
|
||||
switch strings.ToLower(level) {
|
||||
case "fatal":
|
||||
return zapcore.FatalLevel
|
||||
case "error":
|
||||
return zapcore.ErrorLevel
|
||||
case "warn", "warning":
|
||||
return zapcore.WarnLevel
|
||||
case "debug":
|
||||
return zapcore.DebugLevel
|
||||
case "info":
|
||||
return zapcore.InfoLevel
|
||||
}
|
||||
return zapcore.InfoLevel
|
||||
}
|
||||
|
||||
// textFormatter is for compatibility with ngaut/log
|
||||
type textFormatter struct {
|
||||
DisableTimestamp bool
|
||||
}
|
||||
|
||||
// Format implements logrus.Formatter
|
||||
func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
if !f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "%s ", entry.Time.Format(defaultLogTimeFormat))
|
||||
}
|
||||
if file, ok := entry.Data["file"]; ok {
|
||||
fmt.Fprintf(b, "%s:%v:", file, entry.Data["line"])
|
||||
}
|
||||
fmt.Fprintf(b, " [%s] %s", entry.Level.String(), entry.Message)
|
||||
for k, v := range entry.Data {
|
||||
if k != "file" && k != "line" {
|
||||
fmt.Fprintf(b, " %v=%v", k, v)
|
||||
}
|
||||
}
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// StringToLogFormatter uses the different log formatter according to a given format name.
|
||||
func StringToLogFormatter(format string, disableTimestamp bool) log.Formatter {
|
||||
switch strings.ToLower(format) {
|
||||
case "text":
|
||||
return &textFormatter{
|
||||
DisableTimestamp: disableTimestamp,
|
||||
}
|
||||
case "json":
|
||||
return &log.JSONFormatter{
|
||||
TimestampFormat: defaultLogTimeFormat,
|
||||
DisableTimestamp: disableTimestamp,
|
||||
}
|
||||
case "console":
|
||||
return &log.TextFormatter{
|
||||
FullTimestamp: true,
|
||||
TimestampFormat: defaultLogTimeFormat,
|
||||
DisableTimestamp: disableTimestamp,
|
||||
}
|
||||
default:
|
||||
return &textFormatter{}
|
||||
}
|
||||
}
|
||||
|
||||
// InitFileLog initializes file based logging options.
|
||||
func InitFileLog(cfg *zaplog.FileLogConfig) error {
|
||||
if st, err := os.Stat(cfg.Filename); err == nil {
|
||||
if st.IsDir() {
|
||||
return errors.New("can't use directory as log file name")
|
||||
}
|
||||
}
|
||||
if cfg.MaxSize == 0 {
|
||||
cfg.MaxSize = defaultLogMaxSize
|
||||
}
|
||||
|
||||
// use lumberjack to logrotate
|
||||
output := &lumberjack.Logger{
|
||||
Filename: cfg.Filename,
|
||||
MaxSize: cfg.MaxSize,
|
||||
MaxBackups: cfg.MaxBackups,
|
||||
MaxAge: cfg.MaxDays,
|
||||
LocalTime: true,
|
||||
}
|
||||
|
||||
log.SetOutput(output)
|
||||
return nil
|
||||
}
|
||||
|
||||
type wrapLogrus struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
// V provides the functionality that returns whether a particular log level is at
|
||||
// least l - this is needed to meet the LoggerV2 interface. GRPC's logging levels
|
||||
// are: https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go#L71
|
||||
// 0=info, 1=warning, 2=error, 3=fatal
|
||||
// logrus's are: https://github.com/sirupsen/logrus/blob/master/logrus.go
|
||||
// 0=panic, 1=fatal, 2=error, 3=warn, 4=info, 5=debug
|
||||
func (lg *wrapLogrus) V(l int) bool {
|
||||
// translate to logrus level
|
||||
logrusLevel := 4 - l
|
||||
return int(lg.Logger.Level) <= logrusLevel
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// InitLogger initializes PD's logger.
|
||||
func InitLogger(cfg *zaplog.Config) error {
|
||||
var err error
|
||||
|
||||
once.Do(func() {
|
||||
log.SetLevel(StringToLogLevel(cfg.Level))
|
||||
log.AddHook(&contextHook{})
|
||||
|
||||
if cfg.Format == "" {
|
||||
cfg.Format = defaultLogFormat
|
||||
}
|
||||
log.SetFormatter(StringToLogFormatter(cfg.Format, cfg.DisableTimestamp))
|
||||
|
||||
// etcd log
|
||||
capnslog.SetFormatter(&redirectFormatter{})
|
||||
// grpc log
|
||||
lg := &wrapLogrus{log.StandardLogger()}
|
||||
grpclog.SetLoggerV2(lg)
|
||||
// raft log
|
||||
raft.SetLogger(lg)
|
||||
|
||||
if len(cfg.File.Filename) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
err = InitFileLog(&cfg.File)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// LogPanic logs the panic reason and stack, then exit the process.
|
||||
// Commonly used with a `defer`.
|
||||
func LogPanic() {
|
||||
if e := recover(); e != nil {
|
||||
zaplog.Fatal("panic", zap.Reflect("recover", e))
|
||||
}
|
||||
}
|
111
util/logutil/log_test.go
Normal file
111
util/logutil/log_test.go
Normal file
@ -0,0 +1,111 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
. "github.com/pingcap/check"
|
||||
zaplog "github.com/pingcap/log"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const (
|
||||
logPattern = `\d\d\d\d/\d\d/\d\d \d\d:\d\d:\d\d\.\d\d\d ([\w_%!$@.,+~-]+|\\.)+:\d+: \[(fatal|error|warning|info|debug)\] .*?\n`
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
var _ = Suite(&testLogSuite{})
|
||||
|
||||
type testLogSuite struct {
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func (s *testLogSuite) SetUpSuite(c *C) {
|
||||
s.buf = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
func (s *testLogSuite) TestStringToLogLevel(c *C) {
|
||||
c.Assert(StringToLogLevel("fatal"), Equals, log.FatalLevel)
|
||||
c.Assert(StringToLogLevel("ERROR"), Equals, log.ErrorLevel)
|
||||
c.Assert(StringToLogLevel("warn"), Equals, log.WarnLevel)
|
||||
c.Assert(StringToLogLevel("warning"), Equals, log.WarnLevel)
|
||||
c.Assert(StringToLogLevel("debug"), Equals, log.DebugLevel)
|
||||
c.Assert(StringToLogLevel("info"), Equals, log.InfoLevel)
|
||||
c.Assert(StringToLogLevel("whatever"), Equals, log.InfoLevel)
|
||||
}
|
||||
|
||||
func (s *testLogSuite) TestStringToZapLogLevel(c *C) {
|
||||
c.Assert(StringToZapLogLevel("fatal"), Equals, zapcore.FatalLevel)
|
||||
c.Assert(StringToZapLogLevel("ERROR"), Equals, zapcore.ErrorLevel)
|
||||
c.Assert(StringToZapLogLevel("warn"), Equals, zapcore.WarnLevel)
|
||||
c.Assert(StringToZapLogLevel("warning"), Equals, zapcore.WarnLevel)
|
||||
c.Assert(StringToZapLogLevel("debug"), Equals, zapcore.DebugLevel)
|
||||
c.Assert(StringToZapLogLevel("info"), Equals, zapcore.InfoLevel)
|
||||
c.Assert(StringToZapLogLevel("whatever"), Equals, zapcore.InfoLevel)
|
||||
}
|
||||
|
||||
func (s *testLogSuite) TestStringToLogFormatter(c *C) {
|
||||
c.Assert(StringToLogFormatter("text", true), DeepEquals, &textFormatter{
|
||||
DisableTimestamp: true,
|
||||
})
|
||||
c.Assert(StringToLogFormatter("json", true), DeepEquals, &log.JSONFormatter{
|
||||
DisableTimestamp: true,
|
||||
TimestampFormat: defaultLogTimeFormat,
|
||||
})
|
||||
c.Assert(StringToLogFormatter("console", true), DeepEquals, &log.TextFormatter{
|
||||
DisableTimestamp: true,
|
||||
FullTimestamp: true,
|
||||
TimestampFormat: defaultLogTimeFormat,
|
||||
})
|
||||
c.Assert(StringToLogFormatter("", true), DeepEquals, &textFormatter{})
|
||||
}
|
||||
|
||||
// TestLogging assure log format and log redirection works.
|
||||
func (s *testLogSuite) TestLogging(c *C) {
|
||||
conf := &zaplog.Config{Level: "warn", File: zaplog.FileLogConfig{}}
|
||||
c.Assert(InitLogger(conf), IsNil)
|
||||
|
||||
log.SetOutput(s.buf)
|
||||
|
||||
tlog := capnslog.NewPackageLogger("github.com/tikv/pd/pkg/logutil", "test")
|
||||
|
||||
tlog.Infof("[this message should not be sent to buf]")
|
||||
c.Assert(s.buf.Len(), Equals, 0)
|
||||
|
||||
tlog.Warningf("[this message should be sent to buf]")
|
||||
entry, err := s.buf.ReadString('\n')
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(entry, Matches, logPattern)
|
||||
// All capnslog log will be trigered in logutil/log.go
|
||||
c.Assert(strings.Contains(entry, "log.go"), IsTrue)
|
||||
|
||||
log.Warnf("this message comes from logrus")
|
||||
entry, err = s.buf.ReadString('\n')
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(entry, Matches, logPattern)
|
||||
c.Assert(strings.Contains(entry, "log_test.go"), IsTrue)
|
||||
}
|
||||
|
||||
func (s *testLogSuite) TestFileLog(c *C) {
|
||||
c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp"}), NotNil)
|
||||
c.Assert(InitFileLog(&zaplog.FileLogConfig{Filename: "/tmp/test_file_log", MaxSize: 0}), IsNil)
|
||||
}
|
41
util/tsoutil/tso.go
Normal file
41
util/tsoutil/tso.go
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2019 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsoutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/pkg/pdpb"
|
||||
)
|
||||
|
||||
const (
|
||||
physicalShiftBits = 18
|
||||
logicalBits = (1 << physicalShiftBits) - 1
|
||||
)
|
||||
|
||||
// ParseTS parses the ts to (physical,logical).
|
||||
func ParseTS(ts uint64) (time.Time, uint64) {
|
||||
logical := ts & logicalBits
|
||||
physical := ts >> physicalShiftBits
|
||||
physicalTime := time.Unix(int64(physical/1000), int64(physical)%1000*time.Millisecond.Nanoseconds())
|
||||
return physicalTime, logical
|
||||
}
|
||||
|
||||
// ParseTimestamp parses pdpb.Timestamp to time.Time
|
||||
func ParseTimestamp(ts pdpb.Timestamp) (time.Time, uint64) {
|
||||
logical := uint64(ts.Logical)
|
||||
physical := ts.Physical
|
||||
physicalTime := time.Unix(int64(physical/1000), int64(physical)%1000*time.Millisecond.Nanoseconds())
|
||||
return physicalTime, logical
|
||||
}
|
40
util/typeutil/comparison.go
Normal file
40
util/typeutil/comparison.go
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright 2020 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "time"
|
||||
|
||||
// MinUint64 returns the min value between two variables whose type are uint64.
|
||||
func MinUint64(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxUint64 returns the max value between two variables whose type are uint64.
|
||||
func MaxUint64(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinDuration returns the min value between two variables whose type are time.Duration.
|
||||
func MinDuration(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
47
util/typeutil/comparison_test.go
Normal file
47
util/typeutil/comparison_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
func TestComparison(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
var _ = Suite(&testMinMaxSuite{})
|
||||
|
||||
type testMinMaxSuite struct{}
|
||||
|
||||
func (s *testMinMaxSuite) TestMinUint64(c *C) {
|
||||
c.Assert(MinUint64(1, 2), Equals, uint64(1))
|
||||
c.Assert(MinUint64(2, 1), Equals, uint64(1))
|
||||
c.Assert(MinUint64(1, 1), Equals, uint64(1))
|
||||
}
|
||||
|
||||
func (s *testMinMaxSuite) TestMaxUint64(c *C) {
|
||||
c.Assert(MaxUint64(1, 2), Equals, uint64(2))
|
||||
c.Assert(MaxUint64(2, 1), Equals, uint64(2))
|
||||
c.Assert(MaxUint64(1, 1), Equals, uint64(1))
|
||||
}
|
||||
|
||||
func (s *testMinMaxSuite) TestMinDuration(c *C) {
|
||||
c.Assert(MinDuration(time.Minute, time.Second), Equals, time.Second)
|
||||
c.Assert(MinDuration(time.Second, time.Minute), Equals, time.Second)
|
||||
c.Assert(MinDuration(time.Second, time.Second), Equals, time.Second)
|
||||
}
|
36
util/typeutil/convension.go
Normal file
36
util/typeutil/convension.go
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
)
|
||||
|
||||
// BytesToUint64 converts a byte slice to uint64.
|
||||
func BytesToUint64(b []byte) (uint64, error) {
|
||||
if len(b) != 8 {
|
||||
return 0, errors.Errorf("invalid data, must 8 bytes, but %d", len(b))
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint64(b), nil
|
||||
}
|
||||
|
||||
// Uint64ToBytes converts uint64 to a byte slice.
|
||||
func Uint64ToBytes(v uint64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, v)
|
||||
return b
|
||||
}
|
63
util/typeutil/duration.go
Normal file
63
util/typeutil/duration.go
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
)
|
||||
|
||||
// Duration is a wrapper of time.Duration for TOML and JSON.
|
||||
type Duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
// NewDuration creates a Duration from time.Duration.
|
||||
func NewDuration(duration time.Duration) Duration {
|
||||
return Duration{Duration: duration}
|
||||
}
|
||||
|
||||
// MarshalJSON returns the duration as a JSON string.
|
||||
func (d *Duration) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%s"`, d.String())), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a JSON string into the duration.
|
||||
func (d *Duration) UnmarshalJSON(text []byte) error {
|
||||
s, err := strconv.Unquote(string(text))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
duration, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
d.Duration = duration
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses a TOML string into the duration.
|
||||
func (d *Duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// MarshalText returns the duration as a JSON string.
|
||||
func (d Duration) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
49
util/typeutil/duration_test.go
Normal file
49
util/typeutil/duration_test.go
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
var _ = Suite(&testDurationSuite{})
|
||||
|
||||
type testDurationSuite struct{}
|
||||
|
||||
type example struct {
|
||||
Interval Duration `json:"interval" toml:"interval"`
|
||||
}
|
||||
|
||||
func (s *testDurationSuite) TestJSON(c *C) {
|
||||
example := &example{}
|
||||
|
||||
text := []byte(`{"interval":"1h1m1s"}`)
|
||||
c.Assert(json.Unmarshal(text, example), IsNil)
|
||||
c.Assert(example.Interval.Seconds(), Equals, float64(60*60+60+1))
|
||||
|
||||
b, err := json.Marshal(example)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b), Equals, string(text))
|
||||
}
|
||||
|
||||
func (s *testDurationSuite) TestTOML(c *C) {
|
||||
example := &example{}
|
||||
|
||||
text := []byte(`interval = "1h1m1s"`)
|
||||
c.Assert(toml.Unmarshal(text, example), IsNil)
|
||||
c.Assert(example.Interval.Seconds(), Equals, float64(60*60+60+1))
|
||||
}
|
53
util/typeutil/size.go
Normal file
53
util/typeutil/size.go
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// ByteSize is a retype uint64 for TOML and JSON.
|
||||
type ByteSize uint64
|
||||
|
||||
// MarshalJSON returns the size as a JSON string.
|
||||
func (b ByteSize) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + units.BytesSize(float64(b)) + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a JSON string into the bytesize.
|
||||
func (b *ByteSize) UnmarshalJSON(text []byte) error {
|
||||
s, err := strconv.Unquote(string(text))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
v, err := units.RAMInBytes(s)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
*b = ByteSize(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses a Toml string into the bytesize.
|
||||
func (b *ByteSize) UnmarshalText(text []byte) error {
|
||||
v, err := units.RAMInBytes(string(text))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
*b = ByteSize(v)
|
||||
return nil
|
||||
}
|
45
util/typeutil/size_test.go
Normal file
45
util/typeutil/size_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
TestingT(t)
|
||||
}
|
||||
|
||||
var _ = Suite(&testSizeSuite{})
|
||||
|
||||
type testSizeSuite struct {
|
||||
}
|
||||
|
||||
func (s *testSizeSuite) TestJSON(c *C) {
|
||||
b := ByteSize(265421587)
|
||||
o, err := json.Marshal(b)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
var nb ByteSize
|
||||
err = json.Unmarshal(o, &nb)
|
||||
c.Assert(err, IsNil)
|
||||
|
||||
b = ByteSize(1756821276000)
|
||||
o, err = json.Marshal(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(o), Equals, `"1.598TiB"`)
|
||||
}
|
43
util/typeutil/string_slice.go
Normal file
43
util/typeutil/string_slice.go
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/czs007/suvlim/errors"
|
||||
)
|
||||
|
||||
// StringSlice is more friendly to json encode/decode
|
||||
type StringSlice []string
|
||||
|
||||
// MarshalJSON returns the size as a JSON string.
|
||||
func (s StringSlice) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.Quote(strings.Join(s, ","))), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses a JSON string into the bytesize.
|
||||
func (s *StringSlice) UnmarshalJSON(text []byte) error {
|
||||
data, err := strconv.Unquote(string(text))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
*s = []string{}
|
||||
return nil
|
||||
}
|
||||
*s = strings.Split(data, ",")
|
||||
return nil
|
||||
}
|
48
util/typeutil/string_slice_test.go
Normal file
48
util/typeutil/string_slice_test.go
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2017 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
var _ = Suite(&testStringSliceSuite{})
|
||||
|
||||
type testStringSliceSuite struct {
|
||||
}
|
||||
|
||||
func (s *testStringSliceSuite) TestJSON(c *C) {
|
||||
b := StringSlice([]string{"zone", "rack"})
|
||||
o, err := json.Marshal(b)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(o), Equals, "\"zone,rack\"")
|
||||
|
||||
var nb StringSlice
|
||||
err = json.Unmarshal(o, &nb)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(nb, DeepEquals, b)
|
||||
}
|
||||
|
||||
func (s *testStringSliceSuite) TestEmpty(c *C) {
|
||||
ss := StringSlice([]string{})
|
||||
b, err := json.Marshal(ss)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(b), Equals, "\"\"")
|
||||
|
||||
var ss2 StringSlice
|
||||
c.Assert(ss2.UnmarshalJSON(b), IsNil)
|
||||
c.Assert(ss2, DeepEquals, ss)
|
||||
}
|
34
util/typeutil/time.go
Normal file
34
util/typeutil/time.go
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import "time"
|
||||
|
||||
// ZeroTime is a zero time.
|
||||
var ZeroTime = time.Time{}
|
||||
|
||||
// ParseTimestamp returns a timestamp for a given byte slice.
|
||||
func ParseTimestamp(data []byte) (time.Time, error) {
|
||||
nano, err := BytesToUint64(data)
|
||||
if err != nil {
|
||||
return ZeroTime, err
|
||||
}
|
||||
|
||||
return time.Unix(0, int64(nano)), nil
|
||||
}
|
||||
|
||||
// SubTimeByWallClock returns the duration between two different timestamps.
|
||||
func SubTimeByWallClock(after time.Time, before time.Time) time.Duration {
|
||||
return time.Duration(after.UnixNano() - before.UnixNano())
|
||||
}
|
49
util/typeutil/time_test.go
Normal file
49
util/typeutil/time_test.go
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
. "github.com/pingcap/check"
|
||||
)
|
||||
|
||||
var _ = Suite(&testTimeSuite{})
|
||||
|
||||
type testTimeSuite struct{}
|
||||
|
||||
func (s *testTimeSuite) TestParseTimestap(c *C) {
|
||||
for i := 0; i < 3; i++ {
|
||||
t := time.Now().Add(time.Second * time.Duration(rand.Int31n(1000)))
|
||||
data := Uint64ToBytes(uint64(t.UnixNano()))
|
||||
nt, err := ParseTimestamp(data)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(nt.Equal(t), IsTrue)
|
||||
}
|
||||
data := []byte("pd")
|
||||
nt, err := ParseTimestamp(data)
|
||||
c.Assert(err, NotNil)
|
||||
c.Assert(nt.Equal(ZeroTime), IsTrue)
|
||||
}
|
||||
|
||||
func (s *testTimeSuite) TestSubTimeByWallClock(c *C) {
|
||||
for i := 0; i < 3; i++ {
|
||||
r := rand.Int31n(1000)
|
||||
t1 := time.Now()
|
||||
t2 := t1.Add(time.Second * time.Duration(r))
|
||||
duration := SubTimeByWallClock(t2, t1)
|
||||
c.Assert(duration, Equals, time.Second*time.Duration(r))
|
||||
}
|
||||
}
|
@ -2,6 +2,8 @@ package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Key = []byte
|
||||
@ -25,11 +27,12 @@ func NewTikvStore() (*TikvStore, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *TikvStore) PutRows(ctx context.Context, keys [][]byte, values [][]byte, segment string, timestamp []Timestamp) error {
|
||||
func (s *TikvStore) PutRows(ctx context.Context, prefixKeys [][]byte, timeStamp []Timestamp, suffixKeys [][]byte, values [][]byte) error {
|
||||
var i int
|
||||
for i = 0; i < len(keys); i++ {
|
||||
s.kvMap[string(keys[i])] = values[i]
|
||||
s.segmentMap[string(keys[i])] = segment
|
||||
for i = 0; i < len(prefixKeys); i++ {
|
||||
keys := string(prefixKeys[i]) + "_" + string(suffixKeys[i]) + "_" + strconv.FormatUint(timeStamp[i], 10)
|
||||
s.kvMap[keys] = values[i]
|
||||
s.segmentMap[string(prefixKeys[i])] = string(suffixKeys[i])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -37,19 +40,23 @@ func (s *TikvStore) PutRows(ctx context.Context, keys [][]byte, values [][]byte,
|
||||
func (s *TikvStore) DeleteRows(ctx context.Context, keys [][]byte, timestamps []Timestamp) error {
|
||||
var i int
|
||||
for i = 0; i < len(keys); i++ {
|
||||
delete(s.kvMap, string(keys[i]))
|
||||
for k, _ := range s.kvMap {
|
||||
if strings.Index(k, string(keys[i])) != -1 {
|
||||
delete(s.kvMap, k)
|
||||
}
|
||||
}
|
||||
delete(s.segmentMap, string(keys[i]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TikvStore) GetSegment(ctx context.Context, keys [][]byte) []string {
|
||||
func (s *TikvStore) GetSegment(ctx context.Context, keys [][]byte) *[]string {
|
||||
var segmentId []string
|
||||
var i int
|
||||
for i = 0; i < len(keys); i++ {
|
||||
segmentId = append(segmentId, s.segmentMap[string(keys[i])])
|
||||
}
|
||||
return segmentId
|
||||
return &segmentId
|
||||
}
|
||||
|
||||
func (s *TikvStore) GetData(ctx context.Context) map[string][]byte {
|
||||
|
@ -1,69 +1,57 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"context"
|
||||
"github.com/czs007/suvlim/pulsar/schema"
|
||||
"github.com/czs007/suvlim/writer"
|
||||
)
|
||||
|
||||
func GetInsertMsg(entityId int64) *schema.InsertMsg {
|
||||
func GetInsertMsg(collectionName string, partitionTag string, entityId int64) *schema.InsertMsg {
|
||||
return &schema.InsertMsg{
|
||||
CollectionName: "collection",
|
||||
PartitionTag: "tag01",
|
||||
EntityId: entityId,
|
||||
CollectionName: collectionName,
|
||||
PartitionTag: partitionTag,
|
||||
SegmentId: uint64(entityId / 100),
|
||||
EntityId: int64(entityId),
|
||||
Timestamp: uint64(entityId),
|
||||
ClientId: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeleteMsg(entityId int64) *schema.DeleteMsg {
|
||||
func GetDeleteMsg(collectionName string, entityId int64) *schema.DeleteMsg {
|
||||
return &schema.DeleteMsg{
|
||||
CollectionName: "collection",
|
||||
CollectionName: collectionName,
|
||||
EntityId: entityId,
|
||||
Timestamp: uint64(entityId + 100),
|
||||
}
|
||||
}
|
||||
|
||||
//type example struct {
|
||||
// id int
|
||||
//}
|
||||
//
|
||||
//type data struct {
|
||||
// buffer *list.List
|
||||
//}
|
||||
|
||||
//func GetExample(num int) []*example {
|
||||
// var examples []*example
|
||||
// i := 0
|
||||
// for i = 0; i < num; i++ {
|
||||
// examples = append(examples, &example{id: i})
|
||||
// }
|
||||
// return examples
|
||||
//}
|
||||
//
|
||||
//func GetValue(data *list.List, value []int) []int {
|
||||
// for e := data.Front(); e != nil; e = e.Next() {
|
||||
// value = append(value, e.Value.(*example).id)
|
||||
// }
|
||||
// return value
|
||||
//}
|
||||
|
||||
func main() {
|
||||
//ctx := context.Background()
|
||||
deleteBuffer := list.New()
|
||||
//insertBuffer := list.New()
|
||||
deleteBuffer.PushBack(1)
|
||||
deleteBuffer.PushBack(2)
|
||||
var data []*list.Element
|
||||
for e := deleteBuffer.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(int) == 1 {
|
||||
data = append(data, e)
|
||||
}
|
||||
ctx := context.Background()
|
||||
var topics []string
|
||||
topics = append(topics, "test")
|
||||
topics = append(topics, "test1")
|
||||
writerNode, _ := writer.NewWriteNode(ctx, "null", topics, 0)
|
||||
var insertMsgs []*schema.InsertMsg
|
||||
for i := 0; i < 120; i++ {
|
||||
insertMsgs = append(insertMsgs, GetInsertMsg("collection0", "tag01", int64(i)))
|
||||
}
|
||||
fmt.Println(data[0].Value.(int))
|
||||
//writeNode := writer.NewWriteNode(
|
||||
// ctx,
|
||||
// "",
|
||||
// )
|
||||
//a := make(map[string]in)
|
||||
//var wg sync.WaitGroup
|
||||
writerNode.InsertBatchData(ctx, insertMsgs, 100)
|
||||
data1 := writerNode.KvStore.GetData(ctx)
|
||||
gtInsertBuffer := writerNode.GetInsertBuffer()
|
||||
println(len(data1))
|
||||
println(gtInsertBuffer.Len())
|
||||
var insertMsgs2 []*schema.InsertMsg
|
||||
for i := 120; i < 200; i++ {
|
||||
insertMsgs2 = append(insertMsgs2, GetInsertMsg("collection0", "tag02", int64(i)))
|
||||
}
|
||||
writerNode.InsertBatchData(ctx, insertMsgs2, 200)
|
||||
data2 := writerNode.KvStore.GetData(ctx)
|
||||
println(len(data2))
|
||||
var deleteMsgs []*schema.DeleteMsg
|
||||
deleteMsgs = append(deleteMsgs, GetDeleteMsg("collection0", 2))
|
||||
deleteMsgs = append(deleteMsgs, GetDeleteMsg("collection0", 120))
|
||||
writerNode.DeleteBatchData(ctx, deleteMsgs, 200)
|
||||
data3 := writerNode.KvStore.GetData(ctx)
|
||||
println(len(data3))
|
||||
}
|
||||
|
253
writer/writer.go
253
writer/writer.go
@ -11,237 +11,170 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type CollectionMeta struct {
|
||||
collionName string
|
||||
openSegmentId string
|
||||
segmentCloseTime uint64
|
||||
nextSegmentId string
|
||||
nextSegmentCloseTime uint64
|
||||
deleteTimeSync uint64
|
||||
insertTimeSync uint64
|
||||
}
|
||||
//type PartitionMeta struct {
|
||||
// collectionName string
|
||||
// partitionName string
|
||||
// openSegmentId string
|
||||
// segmentCloseTime uint64
|
||||
// nextSegmentId string
|
||||
// nextSegmentCloseTime uint64
|
||||
//}
|
||||
//
|
||||
//type CollectionMeta struct {
|
||||
// collionName string
|
||||
// partitionMetaMap map[string]*PartitionMeta
|
||||
// deleteTimeSync uint64
|
||||
// insertTimeSync uint64
|
||||
//}
|
||||
|
||||
type WriteNode struct {
|
||||
KvStore *mock.TikvStore
|
||||
KvStore *mock.TikvStore
|
||||
mc *pulsar.MessageClient
|
||||
collectionMap map[string]*CollectionMeta
|
||||
gtInsertMsgBuffer *list.List
|
||||
gtDeleteMsgBuffer *list.List
|
||||
deleteTimeSync uint64
|
||||
insertTimeSync uint64
|
||||
}
|
||||
|
||||
func NewWriteNode(ctx context.Context,
|
||||
collectionName []string,
|
||||
openSegmentId []string,
|
||||
closeTime []uint64,
|
||||
nextSegmentId []string,
|
||||
nextCloseSegmentTime []uint64,
|
||||
timeSync []uint64,
|
||||
mc *pulsar.MessageClient) (*WriteNode, error) {
|
||||
address string,
|
||||
topics []string,
|
||||
timeSync uint64) (*WriteNode, error) {
|
||||
kv, err := mock.NewTikvStore()
|
||||
collectionMap := make(map[string]*CollectionMeta)
|
||||
for i := 0; i < len(collectionName); i++ {
|
||||
collectionMap[collectionName[i]] = &CollectionMeta{
|
||||
collionName: collectionName[i],
|
||||
openSegmentId: openSegmentId[i],
|
||||
segmentCloseTime: closeTime[i],
|
||||
nextSegmentId: nextSegmentId[i],
|
||||
nextSegmentCloseTime: nextCloseSegmentTime[i],
|
||||
deleteTimeSync: timeSync[i],
|
||||
insertTimeSync: timeSync[i],
|
||||
}
|
||||
}
|
||||
mc := &pulsar.MessageClient{}
|
||||
return &WriteNode{
|
||||
KvStore: kv,
|
||||
KvStore: kv,
|
||||
mc: mc,
|
||||
collectionMap: collectionMap,
|
||||
gtInsertMsgBuffer: list.New(),
|
||||
gtDeleteMsgBuffer: list.New(),
|
||||
insertTimeSync: timeSync,
|
||||
deleteTimeSync: timeSync,
|
||||
}, err
|
||||
}
|
||||
|
||||
func (wn *WriteNode) InsertBatchData(ctx context.Context, data []*schema.InsertMsg, timeSync map[string]uint64, wg sync.WaitGroup) error {
|
||||
var storeKey string
|
||||
keyMap := make(map[string][][]byte)
|
||||
binaryDataMap := make(map[string][][]byte)
|
||||
timeStampMap := make(map[string][]uint64)
|
||||
func (wn *WriteNode) InsertBatchData(ctx context.Context, data []*schema.InsertMsg, timeSync uint64, wg sync.WaitGroup) error {
|
||||
var prefixKey string
|
||||
var suffixKey string
|
||||
var prefixKeys [][]byte
|
||||
var suffixKeys [][]byte
|
||||
var binaryData [][]byte
|
||||
var timeStamp []uint64
|
||||
|
||||
keyMap, binaryDataMap, timeStampMap = wn.AddInsertMsgBufferData(keyMap, binaryDataMap, timeStampMap, timeSync)
|
||||
wn.AddInsertMsgBufferData(&prefixKeys, &suffixKeys, &binaryData, &timeStamp, timeSync)
|
||||
|
||||
for i := 0; i < len(data); i++ {
|
||||
if data[i].Timestamp <= timeSync[data[i].CollectionName] {
|
||||
CollectionName := data[i].CollectionName
|
||||
storeKey = data[i].CollectionName + strconv.FormatInt(data[i].EntityId, 10)
|
||||
keyMap[CollectionName] = append(keyMap[CollectionName], []byte(storeKey))
|
||||
binaryDataMap[CollectionName] = append(binaryDataMap[CollectionName], data[i].Serialization())
|
||||
timeStampMap[CollectionName] = append(timeStampMap[CollectionName], data[i].Timestamp)
|
||||
if data[i].Timestamp <= timeSync {
|
||||
prefixKey = data[i].CollectionName + "_" + strconv.FormatInt(data[i].EntityId, 10)
|
||||
suffixKey = data[i].PartitionTag + "_" + strconv.FormatUint(data[i].SegmentId, 10)
|
||||
prefixKeys = append(prefixKeys, []byte(prefixKey))
|
||||
suffixKeys = append(suffixKeys, []byte(suffixKey))
|
||||
binaryData = append(binaryData, data[i].Serialization())
|
||||
timeStamp = append(timeStamp, data[i].Timestamp)
|
||||
} else {
|
||||
wn.gtInsertMsgBuffer.PushBack(data[i])
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range wn.collectionMap {
|
||||
if v.segmentCloseTime < timeSync[k] {
|
||||
v.openSegmentId = v.nextSegmentId
|
||||
v.segmentCloseTime = v.nextSegmentCloseTime
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range keyMap {
|
||||
err := (*wn.KvStore).PutRows(ctx, v, binaryDataMap[k], wn.collectionMap[k].openSegmentId, timeStampMap[k])
|
||||
if err != nil {
|
||||
fmt.Println("Can't insert data")
|
||||
}
|
||||
}
|
||||
(*wn.KvStore).PutRows(ctx, prefixKeys, timeStamp, suffixKeys, binaryData)
|
||||
wn.UpdateInsertTimeSync(timeSync)
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wn *WriteNode) DeleteBatchData(ctx context.Context, data []*schema.DeleteMsg, timeSyncMap map[string]uint64, wg sync.WaitGroup) error {
|
||||
var storeKey string
|
||||
keyMap := make(map[string][][]byte)
|
||||
timeStampMap := make(map[string][]uint64)
|
||||
func (wn *WriteNode) DeleteBatchData(ctx context.Context, data []*schema.DeleteMsg, timeSync uint64, wg sync.WaitGroup) error {
|
||||
var prefixKey string
|
||||
var prefixKeys [][]byte
|
||||
var timeStamps []uint64
|
||||
|
||||
keyMap, timeStampMap = wn.AddDeleteMsgBufferData(keyMap, timeStampMap, timeSyncMap)
|
||||
wn.AddDeleteMsgBufferData(&prefixKeys, &timeStamps, timeSync)
|
||||
|
||||
for i := 0; i < len(data); i++ {
|
||||
if data[i].Timestamp <= timeSyncMap[data[i].CollectionName] {
|
||||
CollectionName := data[i].CollectionName
|
||||
storeKey = data[i].CollectionName + strconv.FormatInt(data[i].EntityId, 10)
|
||||
keyMap[CollectionName] = append(keyMap[CollectionName], []byte(storeKey))
|
||||
timeStampMap[CollectionName] = append(timeStampMap[CollectionName], data[i].Timestamp)
|
||||
if data[i].Timestamp <= timeSync {
|
||||
prefixKey = data[i].CollectionName + "_" + strconv.FormatInt(data[i].EntityId, 10) + "_"
|
||||
prefixKeys = append(prefixKeys, []byte(prefixKey))
|
||||
timeStamps = append(timeStamps, data[i].Timestamp)
|
||||
} else {
|
||||
wn.gtDeleteMsgBuffer.PushBack(data[i])
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range wn.collectionMap {
|
||||
if v.segmentCloseTime < timeSyncMap[k] {
|
||||
v.openSegmentId = v.nextSegmentId
|
||||
v.segmentCloseTime = v.nextSegmentCloseTime
|
||||
}
|
||||
err := (*wn.KvStore).DeleteRows(ctx, prefixKeys, timeStamps)
|
||||
if err != nil {
|
||||
fmt.Println("Can't insert data")
|
||||
}
|
||||
|
||||
for k, v := range keyMap {
|
||||
err := (*wn.KvStore).DeleteRows(ctx, v, timeStampMap[k])
|
||||
if err != nil {
|
||||
fmt.Println("Can't insert data")
|
||||
}
|
||||
}
|
||||
wn.UpdateDeleteTimeSync(timeSyncMap)
|
||||
wn.UpdateDeleteTimeSync(timeSync)
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wn *WriteNode) AddNextSegment(collectionName string, segmentId string, closeSegmentTime uint64) {
|
||||
wn.collectionMap[collectionName].nextSegmentId = segmentId
|
||||
wn.collectionMap[collectionName].nextSegmentCloseTime = closeSegmentTime
|
||||
func (wn *WriteNode) UpdateInsertTimeSync(timeSync uint64) {
|
||||
wn.insertTimeSync = timeSync
|
||||
}
|
||||
|
||||
func (wn *WriteNode) UpdateInsertTimeSync(timeSyncMap map[string]uint64) {
|
||||
for k, v := range wn.collectionMap {
|
||||
v.insertTimeSync = timeSyncMap[k]
|
||||
}
|
||||
func (wn *WriteNode) UpdateDeleteTimeSync(timeSync uint64) {
|
||||
wn.deleteTimeSync = timeSync
|
||||
}
|
||||
|
||||
func (wn *WriteNode) UpdateDeleteTimeSync(timeSyncMap map[string]uint64) {
|
||||
for k, v := range wn.collectionMap {
|
||||
v.deleteTimeSync = timeSyncMap[k]
|
||||
}
|
||||
}
|
||||
|
||||
func (wn *WriteNode) UpdateCloseTime(collectionName string, closeTime uint64) {
|
||||
wn.collectionMap[collectionName].segmentCloseTime = closeTime
|
||||
}
|
||||
|
||||
func (wn *WriteNode) AddInsertMsgBufferData(keyMap map[string][][]byte,
|
||||
dataMap map[string][][]byte,
|
||||
timeStampMap map[string][]uint64,
|
||||
timeSyncMap map[string]uint64) (map[string][][]byte, map[string][][]byte, map[string][]uint64) {
|
||||
var storeKey string
|
||||
func (wn *WriteNode) AddInsertMsgBufferData(
|
||||
prefixKeys *[][]byte,
|
||||
suffixKeys *[][]byte,
|
||||
data *[][]byte,
|
||||
timeStamp *[]uint64,
|
||||
timeSync uint64) {
|
||||
var prefixKey string
|
||||
var suffixKey string
|
||||
var selectElement []*list.Element
|
||||
for e := wn.gtInsertMsgBuffer.Front(); e != nil; e = e.Next() {
|
||||
collectionName := e.Value.(*schema.InsertMsg).CollectionName
|
||||
if e.Value.(*schema.InsertMsg).Timestamp <= timeSyncMap[collectionName] {
|
||||
storeKey = collectionName +
|
||||
strconv.FormatInt(e.Value.(*schema.InsertMsg).EntityId, 10)
|
||||
keyMap[collectionName] = append(keyMap[collectionName], []byte(storeKey))
|
||||
dataMap[collectionName] = append(dataMap[collectionName], e.Value.(*schema.InsertMsg).Serialization())
|
||||
timeStampMap[collectionName] = append(timeStampMap[collectionName], e.Value.(*schema.InsertMsg).Timestamp)
|
||||
partitionTag := e.Value.(*schema.InsertMsg).PartitionTag
|
||||
segmentId := e.Value.(*schema.InsertMsg).SegmentId
|
||||
if e.Value.(*schema.InsertMsg).Timestamp <= timeSync {
|
||||
prefixKey = collectionName + "_" + strconv.FormatInt(e.Value.(*schema.InsertMsg).EntityId, 10)
|
||||
suffixKey = partitionTag + "_" + strconv.FormatUint(segmentId, 10)
|
||||
*prefixKeys = append(*prefixKeys, []byte(prefixKey))
|
||||
*suffixKeys = append(*suffixKeys, []byte(suffixKey))
|
||||
*data = append(*data, e.Value.(*schema.InsertMsg).Serialization())
|
||||
*timeStamp = append(*timeStamp, e.Value.(*schema.InsertMsg).Timestamp)
|
||||
selectElement = append(selectElement, e)
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(selectElement); i++ {
|
||||
wn.gtInsertMsgBuffer.Remove(selectElement[i])
|
||||
}
|
||||
return keyMap, dataMap, timeStampMap
|
||||
}
|
||||
|
||||
func (wn *WriteNode) AddDeleteMsgBufferData(keyMap map[string][][]byte,
|
||||
timeStampMap map[string][]uint64,
|
||||
timeSyncMap map[string]uint64) (map[string][][]byte, map[string][]uint64) {
|
||||
var storeKey string
|
||||
func (wn *WriteNode) AddDeleteMsgBufferData(prefixKeys *[][]byte,
|
||||
timeStamps *[]uint64,
|
||||
timeSync uint64) {
|
||||
var prefixKey string
|
||||
var selectElement []*list.Element
|
||||
for e := wn.gtDeleteMsgBuffer.Front(); e != nil; e = e.Next() {
|
||||
collectionName := e.Value.(*schema.InsertMsg).CollectionName
|
||||
if e.Value.(*schema.InsertMsg).Timestamp <= timeSyncMap[collectionName] {
|
||||
storeKey = collectionName +
|
||||
strconv.FormatInt(e.Value.(*schema.InsertMsg).EntityId, 10)
|
||||
keyMap[collectionName] = append(keyMap[collectionName], []byte(storeKey))
|
||||
timeStampMap[collectionName] = append(timeStampMap[collectionName], e.Value.(*schema.InsertMsg).Timestamp)
|
||||
if e.Value.(*schema.DeleteMsg).Timestamp <= timeSync {
|
||||
prefixKey = collectionName + "_" + strconv.FormatInt(e.Value.(*schema.InsertMsg).EntityId, 10) + "_"
|
||||
*prefixKeys = append(*prefixKeys, []byte(prefixKey))
|
||||
*timeStamps = append(*timeStamps, e.Value.(*schema.DeleteMsg).Timestamp)
|
||||
selectElement = append(selectElement, e)
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(selectElement); i++ {
|
||||
wn.gtDeleteMsgBuffer.Remove(selectElement[i])
|
||||
}
|
||||
return keyMap, timeStampMap
|
||||
}
|
||||
|
||||
func (wn *WriteNode) AddCollection(collectionName string,
|
||||
openSegmentId string,
|
||||
closeTime uint64,
|
||||
nextSegmentId string,
|
||||
nextSegmentCloseTime uint64,
|
||||
timeSync uint64) {
|
||||
wn.collectionMap[collectionName] = &CollectionMeta{
|
||||
collionName: collectionName,
|
||||
openSegmentId: openSegmentId,
|
||||
segmentCloseTime: closeTime,
|
||||
nextSegmentId: nextSegmentId,
|
||||
nextSegmentCloseTime: nextSegmentCloseTime,
|
||||
deleteTimeSync: timeSync,
|
||||
insertTimeSync: timeSync,
|
||||
}
|
||||
func (wn *WriteNode) GetInsertBuffer() *list.List {
|
||||
return wn.gtInsertMsgBuffer
|
||||
}
|
||||
|
||||
func (wn *WriteNode) DeleteCollection(collectionName string) {
|
||||
delete(wn.collectionMap, collectionName)
|
||||
var deleteMsg []*list.Element
|
||||
var insertMsg []*list.Element
|
||||
for e := wn.gtInsertMsgBuffer.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*schema.InsertMsg).CollectionName == collectionName {
|
||||
insertMsg = append(insertMsg, e)
|
||||
}
|
||||
}
|
||||
for e := wn.gtDeleteMsgBuffer.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*schema.DeleteMsg).CollectionName == collectionName {
|
||||
deleteMsg = append(deleteMsg, e)
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(insertMsg); i++ {
|
||||
wn.gtInsertMsgBuffer.Remove(insertMsg[i])
|
||||
}
|
||||
for i := 0; i < len(deleteMsg); i++ {
|
||||
wn.gtDeleteMsgBuffer.Remove(deleteMsg[i])
|
||||
}
|
||||
func (wn *WriteNode) GetDeleteBuffer() *list.List {
|
||||
return wn.gtDeleteMsgBuffer
|
||||
}
|
||||
|
||||
func (wn *WriteNode) doWriteNode(ctx context.Context, wg sync.WaitGroup) {
|
||||
deleteTimeSync := make(map[string]uint64)
|
||||
insertTimeSync := make(map[string]uint64)
|
||||
wg.Add(2)
|
||||
go wn.InsertBatchData(ctx, wn.mc.InsertMsg, insertTimeSync, wg)
|
||||
go wn.DeleteBatchData(ctx, wn.mc.DeleteMsg, deleteTimeSync, wg)
|
||||
wg.Wait()
|
||||
//deleteTimeSync := make(map[string]uint64)
|
||||
//insertTimeSync := make(map[string]uint64)
|
||||
//wg.Add(2)
|
||||
//go wn.InsertBatchData(ctx, wn.mc.InsertMsg, insertTimeSync, wg)
|
||||
//go wn.DeleteBatchData(ctx, wn.mc.DeleteMsg, deleteTimeSync, wg)
|
||||
//wg.Wait()
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user