mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
Add Distributed main entrypoint
Signed-off-by: Xiangyu Wang <xiangyu.wang@zilliz.com>
This commit is contained in:
parent
73d2b6a101
commit
99cef4b5c8
8
Makefile
8
Makefile
@ -113,12 +113,12 @@ datanode: build-cpp
|
||||
indexnode: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building distributed indexnode ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/distributed/indexnode $(PWD)/cmd/distributed/indexnode/main.go 1>/dev/null
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexnode $(PWD)/cmd/indexnode/main.go 1>/dev/null
|
||||
|
||||
indexservice: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building distributed indexservice ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/distributed/indexservice $(PWD)/cmd/distributed/indexservice/main.go 1>/dev/null
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexservice $(PWD)/cmd/indexservice/main.go 1>/dev/null
|
||||
|
||||
|
||||
# Builds various components locally.
|
||||
@ -145,9 +145,9 @@ build-go: build-cpp
|
||||
@echo "Building singlenode ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/singlenode $(PWD)/cmd/singlenode/main.go 1>/dev/null
|
||||
@echo "Building distributed indexservice ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexservice $(PWD)/cmd/distributed/indexservice/main.go 1>/dev/null
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexservice $(PWD)/cmd/indexservice/main.go 1>/dev/null
|
||||
@echo "Building distributed indexnode ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexnode $(PWD)/cmd/distributed/indexnode/main.go 1>/dev/null
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/indexnode $(PWD)/cmd/indexnode/main.go 1>/dev/null
|
||||
@echo "Building data node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/datanode $(PWD)/cmd/datanode/main.go 1>/dev/null
|
||||
@echo "Building dataservice ..."
|
||||
|
174
cmd/distributed/main.go
Normal file
174
cmd/distributed/main.go
Normal file
@ -0,0 +1,174 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/ilyakaznacheev/cleanenv"
|
||||
"github.com/oklog/run"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/promlog"
|
||||
grpcproxyservice "github.com/zilliztech/milvus-distributed/internal/distributed/proxyservice"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
type MilvusRoles struct {
|
||||
EnableMaster bool `env:"ENABLE_MASTER"`
|
||||
EnableProxyService bool `env:"ENABLE_PROXY_SERVICE"`
|
||||
EnableProxyNode bool `env:"ENABLE_PROXY_NODE"`
|
||||
EnableQueryService bool `env:"ENABLE_QUERY_SERVICE"`
|
||||
EnableQueryNode bool `env:"ENABLE_QUERY_NODE"`
|
||||
EnableDataService bool `env:"ENABLE_DATA_SERVICE"`
|
||||
EnableDataNode bool `env:"ENABLE_DATA_NODE"`
|
||||
EnableIndexService bool `env:"ENABLE_INDEX_SERVICE"`
|
||||
EnableIndexNode bool `env:"ENABLE_INDEX_NODE"`
|
||||
EnableMsgStreamService bool `env:"ENABLE_MSGSTREAM_SERVICE"`
|
||||
}
|
||||
|
||||
func (mr *MilvusRoles) hasAnyRole() bool {
|
||||
return mr.EnableMaster || mr.EnableMsgStreamService ||
|
||||
mr.EnableProxyService || mr.EnableProxyNode ||
|
||||
mr.EnableQueryService || mr.EnableQueryNode ||
|
||||
mr.EnableDataService || mr.EnableDataNode ||
|
||||
mr.EnableIndexService || mr.EnableIndexNode
|
||||
}
|
||||
|
||||
var roles MilvusRoles
|
||||
|
||||
func main() {
|
||||
a := kingpin.New(filepath.Base(os.Args[0]), "Milvus")
|
||||
|
||||
a.HelpFlag.Short('h')
|
||||
|
||||
a.Flag("master", "Run master service").Short('m').Default("false").BoolVar(&roles.EnableMaster)
|
||||
|
||||
a.Flag("msgstream-service", "Run msgstream service").Short('M').Default("false").BoolVar(&roles.EnableMsgStreamService)
|
||||
|
||||
a.Flag("proxy-service", "Run proxy service").Short('p').Default("false").BoolVar(&roles.EnableProxyService)
|
||||
|
||||
a.Flag("proxy-node", "Run proxy node").Short('P').Default("false").BoolVar(&roles.EnableProxyNode)
|
||||
|
||||
a.Flag("query-service", "Run query service").Short('q').Default("false").BoolVar(&roles.EnableQueryService)
|
||||
|
||||
a.Flag("query-node", "Run query node").Short('Q').Default("false").BoolVar(&roles.EnableQueryNode)
|
||||
|
||||
a.Flag("data-service", "Run data service").Short('d').Default("false").BoolVar(&roles.EnableDataService)
|
||||
|
||||
a.Flag("data-node", "Run data node").Short('D').Default("false").BoolVar(&roles.EnableDataNode)
|
||||
|
||||
a.Flag("index-service", "Run index service").Short('i').Default("false").BoolVar(&roles.EnableIndexService)
|
||||
|
||||
a.Flag("index-node", "Run index node").Short('I').Default("false").BoolVar(&roles.EnableIndexNode)
|
||||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, errors.Wrapf(err, "Error parsing commandline arguments"))
|
||||
a.Usage(os.Args[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if !roles.hasAnyRole() {
|
||||
err := cleanenv.ReadEnv(&roles)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
if !roles.hasAnyRole() {
|
||||
fmt.Println("Please select at least one service to start")
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
logger := promlog.New(NewLogConfig())
|
||||
|
||||
var (
|
||||
ctxProxyService, cancelProxyService = context.WithCancel(context.Background())
|
||||
proxyService = NewProxyService(ctxProxyService)
|
||||
)
|
||||
|
||||
var g run.Group
|
||||
{
|
||||
// Termination handler.
|
||||
term := make(chan os.Signal, 1)
|
||||
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
|
||||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
select {
|
||||
case <-term:
|
||||
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
|
||||
case <-cancel:
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(err error) {
|
||||
close(cancel)
|
||||
},
|
||||
)
|
||||
}
|
||||
if roles.EnableProxyService {
|
||||
// ProxyService
|
||||
g.Add(
|
||||
func() error {
|
||||
err := proxyService.Run()
|
||||
level.Info(logger).Log("msg", "Proxy service stopped")
|
||||
return err
|
||||
},
|
||||
func(err error) {
|
||||
level.Info(logger).Log("msg", "Stopping proxy service...")
|
||||
cancelProxyService()
|
||||
},
|
||||
)
|
||||
}
|
||||
if roles.EnableProxyNode {
|
||||
// ProxyNode
|
||||
}
|
||||
|
||||
if err := g.Run(); err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
level.Info(logger).Log("msg", "See you next time!")
|
||||
}
|
||||
|
||||
func NewLogConfig() *promlog.Config {
|
||||
logConfig := promlog.Config{
|
||||
Level: &promlog.AllowedLevel{},
|
||||
Format: &promlog.AllowedFormat{},
|
||||
}
|
||||
err := logConfig.Level.Set("debug")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
err = logConfig.Format.Set("logfmt")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
return &logConfig
|
||||
}
|
||||
|
||||
// Move to proxyservice package
|
||||
func NewProxyService(ctx context.Context) *ProxyService {
|
||||
srv, _ := grpcproxyservice.NewServer(ctx)
|
||||
ps := &ProxyService{ctx: ctx, server: srv}
|
||||
return ps
|
||||
}
|
||||
|
||||
type ProxyService struct {
|
||||
ctx context.Context
|
||||
server *grpcproxyservice.Server
|
||||
}
|
||||
|
||||
func (ps *ProxyService) Run() error {
|
||||
return ps.server.Run()
|
||||
}
|
@ -24,8 +24,7 @@ import (
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
grpcindexnode.Init()
|
||||
svr, err := grpcindexnode.NewGrpcServer(ctx)
|
||||
svr, err := grpcindexnode.NewServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
@ -43,7 +42,7 @@ func main() {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
if err := svr.Run(); err != nil {
|
||||
log.Fatal("run builder server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
grpcindexserver.Init()
|
||||
svr, err := grpcindexserver.NewServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
@ -43,14 +42,17 @@ func main() {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
if err := svr.Run(); err != nil {
|
||||
log.Fatal("run builder server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Stop()
|
||||
if err := svr.Stop(); err != nil {
|
||||
log.Fatal("stop server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
@ -36,6 +36,9 @@ func main() {
|
||||
psc.Params.Init()
|
||||
log.Printf("proxy service address : %s", psc.Params.ServiceAddress)
|
||||
proxyService := psc.NewClient(psc.Params.ServiceAddress)
|
||||
if err = proxyService.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for cnt = 0; cnt < reTryCnt; cnt++ {
|
||||
pxStates, err := proxyService.GetComponentStates()
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := grpcproxynode.NewServer()
|
||||
svr, err := grpcproxynode.NewServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := grpcproxyservice.NewServer()
|
||||
svr, err := grpcproxyservice.NewServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -140,9 +140,8 @@ func InitQueryNode(wg *sync.WaitGroup) {
|
||||
|
||||
func InitIndexBuilder(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
indexnode.Init()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := indexnode.CreateIndexNode(ctx)
|
||||
svr, err := indexnode.NewNodeImpl(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
@ -160,6 +159,10 @@ func InitIndexBuilder(wg *sync.WaitGroup) {
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Init(); err != nil {
|
||||
log.Fatal("init builder server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run builder server failed", zap.Error(err))
|
||||
}
|
||||
|
58
docs/developer_guides/entrypoint.md
Normal file
58
docs/developer_guides/entrypoint.md
Normal file
@ -0,0 +1,58 @@
|
||||
## Entrypoint
|
||||
|
||||
Usage:
|
||||
|
||||
- Command line arguments
|
||||
|
||||
- ```shell
|
||||
$ ./milvus-distributed --help
|
||||
usage: main [<flags>]
|
||||
|
||||
Milvus
|
||||
|
||||
Flags:
|
||||
--help Show context-sensitive help (also try --help-long and --help-man).
|
||||
-m, --master Run master service
|
||||
-M, --msgstream-service Run msgstream service
|
||||
-p, --proxy-service Run proxy service
|
||||
-P, --proxy-node Run proxy node
|
||||
-q, --query-service Run query service
|
||||
-Q, --query-node Run query node
|
||||
-d, --data-service Run data service
|
||||
-D, --data-node Run data node
|
||||
-i, --index-service Run index service
|
||||
-I, --index-node Run index node
|
||||
|
||||
|
||||
# Startup master and proxy in a container
|
||||
$ ./milvus-distributed --master --proxy
|
||||
```
|
||||
|
||||
- environment variables
|
||||
|
||||
- ```
|
||||
$ export ENABLE_MASTER=1
|
||||
$ export ENABLE_PROXY=1
|
||||
$ ./milvus-distributed
|
||||
```
|
||||
|
||||
- ```shell
|
||||
$ ENABLE_MASTER=1 ENABLE_PROXY=1 ./milvus-distributed
|
||||
```
|
||||
|
||||
- docker-compose
|
||||
|
||||
- ```yaml
|
||||
milvus-master:
|
||||
image: milvusdb/milvus-distributed:latest
|
||||
environment:
|
||||
- master=1
|
||||
|
||||
milvus-proxy:
|
||||
image: milvusdb/milvus-distributed:latest
|
||||
environment:
|
||||
- proxy=1
|
||||
```
|
||||
|
||||
|
||||
|
13
go.mod
13
go.mod
@ -5,22 +5,29 @@ go 1.15
|
||||
require (
|
||||
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
|
||||
github.com/akamensky/argparse v1.2.2
|
||||
github.com/apache/pulsar-client-go v0.1.1
|
||||
github.com/apache/thrift v0.13.0
|
||||
github.com/aws/aws-sdk-go v1.30.8 // indirect
|
||||
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/frankban/quicktest v1.10.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-basic/ipv4 v1.0.0
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/mock v1.3.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/ilyakaznacheev/cleanenv v1.2.5
|
||||
github.com/klauspost/compress v1.10.11 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.5
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/onsi/ginkgo v1.12.1 // indirect
|
||||
github.com/onsi/gomega v1.10.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
@ -29,11 +36,12 @@ require (
|
||||
github.com/pingcap/errors v0.11.4 // indirect
|
||||
github.com/pingcap/log v0.0.0-20200828042413-fce0951f1463 // indirect
|
||||
github.com/pivotal-golang/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.5.1 // indirect
|
||||
github.com/prometheus/common v0.10.0 // indirect
|
||||
github.com/prometheus/common v0.10.0
|
||||
github.com/prometheus/procfs v0.1.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.2.1 // indirect
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/sirupsen/logrus v1.6.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0
|
||||
github.com/spf13/cast v1.3.0
|
||||
github.com/spf13/viper v1.7.1
|
||||
@ -54,6 +62,7 @@ require (
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 // indirect
|
||||
google.golang.org/grpc v1.31.0
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/yaml.v2 v2.3.0 // indirect
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
|
||||
|
15
go.sum
15
go.sum
@ -18,6 +18,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/akamensky/argparse v1.2.2 h1:P17T0ZjlUNJuWTPPJ2A5dM1wxarHgHqfYH+AZTo2xQA=
|
||||
github.com/akamensky/argparse v1.2.2/go.mod h1:S5kwC7IuDcEr5VeXtGPRVZ5o/FdhcMlQz4IZQuw64xA=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@ -80,6 +82,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA=
|
||||
github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
@ -104,10 +107,13 @@ github.com/go-basic/ipv4 v1.0.0 h1:gjyFAa1USC1hhXTkPOwBWDPfMcUaIM+tvo1XzV9EZxs=
|
||||
github.com/go-basic/ipv4 v1.0.0/go.mod h1:etLBnaxbidQfuqE6wgZQfs38nEWNmzALkxDZe4xY8Dg=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@ -192,12 +198,16 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ilyakaznacheev/cleanenv v1.2.5 h1:/SlcF9GaIvefWqFJzsccGG/NJdoaAwb7Mm7ImzhO3DM=
|
||||
github.com/ilyakaznacheev/cleanenv v1.2.5/go.mod h1:/i3yhzwZ3s7hacNERGFwvlhwXMDcaqwIzmayEhbRplk=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk=
|
||||
github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg=
|
||||
github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
@ -220,6 +230,7 @@ github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@ -267,6 +278,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
|
||||
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -640,6 +653,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
olympos.io/encoding/edn v0.0.0-20200308123125-93e3b8dd0e24 h1:sreVOrDp0/ezb0CHKVek/l7YwpxPJqv+jT3izfSphA4=
|
||||
olympos.io/encoding/edn v0.0.0-20200308123125-93e3b8dd0e24/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
|
@ -24,7 +24,7 @@ func newAllocatorImpl(s MasterServiceInterface) *allocatorImpl {
|
||||
func (alloc *allocatorImpl) allocID() (UniqueID, error) {
|
||||
resp, err := alloc.masterService.AllocID(&masterpb.IDRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgType: commonpb.MsgType_kRequestID,
|
||||
MsgID: 1, // GOOSE TODO
|
||||
Timestamp: 0, // GOOSE TODO
|
||||
SourceID: Params.NodeID,
|
||||
|
@ -1,20 +1,15 @@
|
||||
package datanode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
)
|
||||
|
||||
func makeNewChannelNames(names []string, suffix string) []string {
|
||||
@ -36,52 +31,10 @@ func refreshChannelNames() {
|
||||
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
|
||||
}
|
||||
|
||||
func startMaster(ctx context.Context) {
|
||||
master.Init()
|
||||
etcdAddr := master.Params.EtcdAddress
|
||||
metaRootPath := master.Params.MetaRootPath
|
||||
|
||||
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
masterPort := 53101
|
||||
master.Params.Port = masterPort
|
||||
svr, err := master.CreateServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for server!", svr.IsServing())
|
||||
Params.MasterAddress = master.Params.Address + ":" + strconv.Itoa(masterPort)
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
Params.Init()
|
||||
|
||||
refreshChannelNames()
|
||||
const ctxTimeInMillisecond = 2000
|
||||
const closeWithDeadline = true
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
startMaster(ctx)
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
@ -12,9 +12,9 @@ import (
|
||||
)
|
||||
|
||||
type metaTable struct {
|
||||
client kv.TxnBase //
|
||||
client kv.Base //
|
||||
segID2FlushMeta map[UniqueID]*datapb.SegmentFlushMeta
|
||||
collID2DdlMeta map[UniqueID]*datapb.DDLFlushMeta // GOOSE TODO: addDDLFlush and has DDLFlush
|
||||
collID2DdlMeta map[UniqueID]*datapb.DDLFlushMeta
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
@ -36,24 +36,6 @@ func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
|
||||
return mt, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AppendDDLBinlogPaths(collID UniqueID, paths []string) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
|
||||
_, ok := mt.collID2DdlMeta[collID]
|
||||
if !ok {
|
||||
mt.collID2DdlMeta[collID] = &datapb.DDLFlushMeta{
|
||||
CollectionID: collID,
|
||||
BinlogPaths: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
meta := mt.collID2DdlMeta[collID]
|
||||
meta.BinlogPaths = append(meta.BinlogPaths, paths...)
|
||||
|
||||
return mt.saveDDLFlushMeta(meta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) AppendSegBinlogPaths(segmentID UniqueID, fieldID int64, dataPaths []string) error {
|
||||
_, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
@ -97,44 +79,6 @@ func (mt *metaTable) CompleteFlush(segmentID UniqueID) error {
|
||||
return mt.saveSegFlushMeta(meta)
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveDDLFlushMeta(meta *datapb.DDLFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.collID2DdlMeta[meta.CollectionID] = meta
|
||||
prefix := path.Join(Params.DDLFlushMetaSubPath, strconv.FormatInt(meta.CollectionID, 10))
|
||||
|
||||
return mt.client.Save(prefix, value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadDdlMetaFromKV() error {
|
||||
mt.collID2DdlMeta = make(map[UniqueID]*datapb.DDLFlushMeta)
|
||||
_, values, err := mt.client.LoadWithPrefix(Params.DDLFlushMetaSubPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
ddlMeta := &datapb.DDLFlushMeta{}
|
||||
err = proto.UnmarshalText(value, ddlMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.collID2DdlMeta[ddlMeta.CollectionID] = ddlMeta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveSegFlushMeta(meta *datapb.SegmentFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.segID2FlushMeta[meta.SegmentID] = meta
|
||||
prefix := path.Join(Params.SegFlushMetaSubPath, strconv.FormatInt(meta.SegmentID, 10))
|
||||
|
||||
return mt.client.Save(prefix, value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadSegMetaFromKV() error {
|
||||
mt.segID2FlushMeta = make(map[UniqueID]*datapb.SegmentFlushMeta)
|
||||
|
||||
@ -155,6 +99,16 @@ func (mt *metaTable) reloadSegMetaFromKV() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveSegFlushMeta(meta *datapb.SegmentFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.segID2FlushMeta[meta.SegmentID] = meta
|
||||
prefix := path.Join(Params.SegFlushMetaSubPath, strconv.FormatInt(meta.SegmentID, 10))
|
||||
|
||||
return mt.client.Save(prefix, value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) addSegmentFlush(segmentID UniqueID) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
@ -197,6 +151,61 @@ func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string,
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// --- DDL ---
|
||||
func (mt *metaTable) AppendDDLBinlogPaths(collID UniqueID, paths []string) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
|
||||
_, ok := mt.collID2DdlMeta[collID]
|
||||
if !ok {
|
||||
mt.collID2DdlMeta[collID] = &datapb.DDLFlushMeta{
|
||||
CollectionID: collID,
|
||||
BinlogPaths: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
meta := mt.collID2DdlMeta[collID]
|
||||
meta.BinlogPaths = append(meta.BinlogPaths, paths...)
|
||||
|
||||
return mt.saveDDLFlushMeta(meta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) hasDDLFlushMeta(collID UniqueID) bool {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
|
||||
_, ok := mt.collID2DdlMeta[collID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveDDLFlushMeta(meta *datapb.DDLFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.collID2DdlMeta[meta.CollectionID] = meta
|
||||
prefix := path.Join(Params.DDLFlushMetaSubPath, strconv.FormatInt(meta.CollectionID, 10))
|
||||
|
||||
return mt.client.Save(prefix, value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadDdlMetaFromKV() error {
|
||||
mt.collID2DdlMeta = make(map[UniqueID]*datapb.DDLFlushMeta)
|
||||
_, values, err := mt.client.LoadWithPrefix(Params.DDLFlushMetaSubPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
ddlMeta := &datapb.DDLFlushMeta{}
|
||||
err = proto.UnmarshalText(value, ddlMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.collID2DdlMeta[ddlMeta.CollectionID] = ddlMeta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) getDDLBinlogPaths(collID UniqueID) (map[UniqueID][]string, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
|
@ -1,26 +1,16 @@
|
||||
package datanode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
memkv "github.com/zilliztech/milvus-distributed/internal/kv/mem"
|
||||
)
|
||||
|
||||
func TestMetaTable_all(t *testing.T) {
|
||||
func TestMetaTable_SegmentFlush(t *testing.T) {
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
require.NoError(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/meta/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/meta/root", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
kvMock := memkv.NewMemoryKV()
|
||||
meta, err := NewMetaTable(kvMock)
|
||||
assert.NoError(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
@ -65,27 +55,6 @@ func TestMetaTable_all(t *testing.T) {
|
||||
ret)
|
||||
})
|
||||
|
||||
t.Run("TestMetaTable_AppendDDLBinlogPaths", func(t *testing.T) {
|
||||
|
||||
collID2Paths := map[UniqueID][]string{
|
||||
301: {"a", "b", "c"},
|
||||
302: {"c", "b", "a"},
|
||||
}
|
||||
|
||||
for collID, dataPaths := range collID2Paths {
|
||||
for _, dp := range dataPaths {
|
||||
err = meta.AppendDDLBinlogPaths(collID, []string{dp})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range collID2Paths {
|
||||
ret, err := meta.getDDLBinlogPaths(k)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, map[UniqueID][]string{k: v}, ret)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TestMetaTable_CompleteFlush", func(t *testing.T) {
|
||||
|
||||
var segmentID UniqueID = 401
|
||||
@ -105,3 +74,37 @@ func TestMetaTable_all(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestMetaTable_DDLFlush(t *testing.T) {
|
||||
kvMock := memkv.NewMemoryKV()
|
||||
meta, err := NewMetaTable(kvMock)
|
||||
assert.NoError(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
t.Run("TestMetaTable_AppendDDLBinlogPaths", func(t *testing.T) {
|
||||
|
||||
assert.False(t, meta.hasDDLFlushMeta(301))
|
||||
assert.False(t, meta.hasDDLFlushMeta(302))
|
||||
|
||||
collID2Paths := map[UniqueID][]string{
|
||||
301: {"a", "b", "c"},
|
||||
302: {"c", "b", "a"},
|
||||
}
|
||||
|
||||
for collID, dataPaths := range collID2Paths {
|
||||
for _, dp := range dataPaths {
|
||||
err = meta.AppendDDLBinlogPaths(collID, []string{dp})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range collID2Paths {
|
||||
ret, err := meta.getDDLBinlogPaths(k)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, map[UniqueID][]string{k: v}, ret)
|
||||
}
|
||||
|
||||
assert.True(t, meta.hasDDLFlushMeta(301))
|
||||
assert.True(t, meta.hasDDLFlushMeta(302))
|
||||
})
|
||||
}
|
||||
|
@ -49,13 +49,14 @@ func (cm *insertChannelManager) AllocChannels(collectionID UniqueID, groupNum in
|
||||
group = make([]string, m)
|
||||
}
|
||||
for k := 0; k < len(group); k++ {
|
||||
group = append(group, Params.InsertChannelPrefixName+strconv.Itoa(cm.count))
|
||||
group[k] = Params.InsertChannelPrefixName + strconv.Itoa(cm.count)
|
||||
cm.count++
|
||||
}
|
||||
i += int64(len(group))
|
||||
j++
|
||||
cg = append(cg, group)
|
||||
}
|
||||
cm.channelGroups[collectionID] = cg
|
||||
return cg, nil
|
||||
}
|
||||
|
||||
|
38
internal/dataservice/channel_test.go
Normal file
38
internal/dataservice/channel_test.go
Normal file
@ -0,0 +1,38 @@
|
||||
package dataservice
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChannelAllocation(t *testing.T) {
|
||||
Params.Init()
|
||||
Params.InsertChannelNumPerCollection = 4
|
||||
manager := newInsertChannelManager()
|
||||
cases := []struct {
|
||||
collectionID UniqueID
|
||||
groupNum int
|
||||
expectGroupNum int
|
||||
success bool
|
||||
}{
|
||||
{1, 4, 4, true},
|
||||
{1, 4, 4, false},
|
||||
{2, 1, 1, true},
|
||||
{3, 5, 4, true},
|
||||
}
|
||||
for _, c := range cases {
|
||||
channels, err := manager.AllocChannels(c.collectionID, c.expectGroupNum)
|
||||
if !c.success {
|
||||
assert.NotNil(t, err)
|
||||
continue
|
||||
}
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, c.expectGroupNum, len(channels))
|
||||
total := 0
|
||||
for _, channel := range channels {
|
||||
total += len(channel)
|
||||
}
|
||||
assert.EqualValues(t, Params.InsertChannelNumPerCollection, total)
|
||||
}
|
||||
}
|
@ -64,8 +64,8 @@ func (c *dataNodeCluster) GetNodeIDs() []int64 {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
ret := make([]int64, len(c.nodes))
|
||||
for _, node := range c.nodes {
|
||||
ret = append(ret, node.id)
|
||||
for i, node := range c.nodes {
|
||||
ret[i] = node.id
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -84,6 +84,8 @@ func newSegmentAllocator(meta *meta, allocator allocator) (*segmentAllocatorImpl
|
||||
}
|
||||
|
||||
func (allocator *segmentAllocatorImpl) OpenSegment(segmentInfo *datapb.SegmentInfo) error {
|
||||
allocator.mu.Lock()
|
||||
defer allocator.mu.Unlock()
|
||||
if _, ok := allocator.segments[segmentInfo.SegmentID]; ok {
|
||||
return fmt.Errorf("segment %d already exist", segmentInfo.SegmentID)
|
||||
}
|
||||
|
@ -652,9 +652,9 @@ func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*dat
|
||||
}
|
||||
fields := make([]UniqueID, len(flushMeta.Fields))
|
||||
paths := make([]*internalpb2.StringList, len(flushMeta.Fields))
|
||||
for _, field := range flushMeta.Fields {
|
||||
fields = append(fields, field.FieldID)
|
||||
paths = append(paths, &internalpb2.StringList{Values: field.BinlogPaths})
|
||||
for i, field := range flushMeta.Fields {
|
||||
fields[i] = field.FieldID
|
||||
paths[i] = &internalpb2.StringList{Values: field.BinlogPaths}
|
||||
}
|
||||
resp.FieldIDs = fields
|
||||
resp.Paths = paths
|
||||
@ -674,7 +674,7 @@ func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) ([]string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
channels := make([]string, Params.InsertChannelNumPerCollection)
|
||||
channels := make([]string, 0)
|
||||
for _, group := range channelGroups {
|
||||
channels = append(channels, group...)
|
||||
}
|
||||
|
@ -69,40 +69,47 @@ func (watcher *dataNodeTimeTickWatcher) StartBackgroundLoop(ctx context.Context)
|
||||
log.Println("data node time tick watcher closed")
|
||||
return
|
||||
case msg := <-watcher.msgQueue:
|
||||
segments, err := watcher.allocator.GetSealedSegments()
|
||||
if err != nil {
|
||||
log.Printf("get sealed segments error %s", err.Error())
|
||||
if err := watcher.handleTimeTickMsg(msg); err != nil {
|
||||
log.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
for _, id := range segments {
|
||||
expired, err := watcher.allocator.IsAllocationsExpired(id, msg.Base.Timestamp)
|
||||
if err != nil {
|
||||
log.Printf("check allocations expired error %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if expired {
|
||||
segmentInfo, err := watcher.meta.GetSegment(id)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
if err = watcher.meta.SetSegmentState(id, datapb.SegmentState_SegmentSealed); err != nil {
|
||||
log.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
watcher.cluster.FlushSegment(&datapb.FlushSegRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: -1, // todo add msg id
|
||||
Timestamp: 0, // todo
|
||||
SourceID: -1, // todo
|
||||
},
|
||||
CollectionID: segmentInfo.CollectionID,
|
||||
SegmentIDs: []int64{segmentInfo.SegmentID},
|
||||
})
|
||||
watcher.allocator.DropSegment(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (watcher *dataNodeTimeTickWatcher) handleTimeTickMsg(msg *msgstream.TimeTickMsg) error {
|
||||
segments, err := watcher.allocator.GetSealedSegments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, id := range segments {
|
||||
expired, err := watcher.allocator.IsAllocationsExpired(id, msg.Base.Timestamp)
|
||||
if err != nil {
|
||||
log.Printf("check allocations expired error %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if expired {
|
||||
segmentInfo, err := watcher.meta.GetSegment(id)
|
||||
if err != nil {
|
||||
log.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
if err = watcher.meta.SetSegmentState(id, datapb.SegmentState_SegmentSealed); err != nil {
|
||||
log.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
watcher.cluster.FlushSegment(&datapb.FlushSegRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: -1, // todo add msg id
|
||||
Timestamp: 0, // todo
|
||||
SourceID: Params.NodeID,
|
||||
},
|
||||
CollectionID: segmentInfo.CollectionID,
|
||||
SegmentIDs: []int64{segmentInfo.SegmentID},
|
||||
})
|
||||
watcher.allocator.DropSegment(id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
97
internal/dataservice/watcher_test.go
Normal file
97
internal/dataservice/watcher_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package dataservice
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDataNodeTTWatcher(t *testing.T) {
|
||||
Params.Init()
|
||||
c := make(chan struct{})
|
||||
cluster := newDataNodeCluster(c)
|
||||
defer cluster.ShutDownClients()
|
||||
schema := newTestSchema()
|
||||
allocator := newMockAllocator()
|
||||
meta, err := newMemoryMeta(allocator)
|
||||
assert.Nil(t, err)
|
||||
segAllocator, err := newSegmentAllocator(meta, allocator)
|
||||
assert.Nil(t, err)
|
||||
watcher := newDataNodeTimeTickWatcher(meta, segAllocator, cluster)
|
||||
|
||||
id, err := allocator.allocID()
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddCollection(&collectionInfo{
|
||||
Schema: schema,
|
||||
ID: id,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
cases := []struct {
|
||||
sealed bool
|
||||
allocation bool
|
||||
expired bool
|
||||
expected bool
|
||||
}{
|
||||
{false, false, true, false},
|
||||
{false, true, true, false},
|
||||
{false, true, false, false},
|
||||
{true, false, true, true},
|
||||
{true, true, false, false},
|
||||
{true, true, true, true},
|
||||
}
|
||||
|
||||
segmentIDs := make([]UniqueID, len(cases))
|
||||
for i, c := range cases {
|
||||
segID, err := allocator.allocID()
|
||||
segmentIDs[i] = segID
|
||||
assert.Nil(t, err)
|
||||
segmentInfo, err := BuildSegment(id, 100, segID, []string{"channel" + strconv.Itoa(i)})
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(segmentInfo)
|
||||
assert.Nil(t, err)
|
||||
err = segAllocator.OpenSegment(segmentInfo)
|
||||
assert.Nil(t, err)
|
||||
if c.allocation && c.expired {
|
||||
_, _, _, err := segAllocator.AllocSegment(id, 100, "channel"+strconv.Itoa(i), 100)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(Params.SegIDAssignExpiration) * time.Millisecond)
|
||||
for i, c := range cases {
|
||||
if c.allocation && !c.expired {
|
||||
_, _, _, err := segAllocator.AllocSegment(id, 100, "channel"+strconv.Itoa(i), 100)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if c.sealed {
|
||||
err := segAllocator.SealSegment(segmentIDs[i])
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
ts, err := allocator.allocTimestamp()
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = watcher.handleTimeTickMsg(&msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{0},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
Timestamp: ts,
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
for i, c := range cases {
|
||||
_, ok := segAllocator.segments[segmentIDs[i]]
|
||||
assert.EqualValues(t, !c.expected, ok)
|
||||
}
|
||||
}
|
@ -3,9 +3,10 @@ package grpcindexnodeclient
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
@ -13,15 +14,40 @@ import (
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
grpcClient indexpb.IndexNodeClient
|
||||
nodeAddress string
|
||||
grpcClient indexpb.IndexNodeClient
|
||||
address string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (c Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (c *Client) Init() error {
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.grpcClient = indexpb.NewIndexNodeClient(conn)
|
||||
return nil
|
||||
}
|
||||
err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return c.grpcClient.GetComponentStates(context.Background(), &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (c Client) GetTimeTickChannel() (string, error) {
|
||||
func (c *Client) GetTimeTickChannel() (string, error) {
|
||||
resp, err := c.grpcClient.GetTimeTickChannel(context.Background(), &commonpb.Empty{})
|
||||
|
||||
if err != nil {
|
||||
@ -33,7 +59,7 @@ func (c Client) GetTimeTickChannel() (string, error) {
|
||||
return resp.Value, nil
|
||||
}
|
||||
|
||||
func (c Client) GetStatisticsChannel() (string, error) {
|
||||
func (c *Client) GetStatisticsChannel() (string, error) {
|
||||
resp, err := c.grpcClient.GetStatisticsChannel(context.Background(), &commonpb.Empty{})
|
||||
|
||||
if err != nil {
|
||||
@ -45,44 +71,15 @@ func (c Client) GetStatisticsChannel() (string, error) {
|
||||
return resp.Value, nil
|
||||
}
|
||||
|
||||
func (c Client) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Client) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Client) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) tryConnect() error {
|
||||
if c.grpcClient != nil {
|
||||
return nil
|
||||
}
|
||||
ctx1, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx1, c.nodeAddress, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
log.Printf("Connect to IndexNode failed, error= %v", err)
|
||||
return err
|
||||
}
|
||||
c.grpcClient = indexpb.NewIndexNodeClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) BuildIndex(req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
|
||||
ctx := context.TODO()
|
||||
c.tryConnect()
|
||||
|
||||
return c.grpcClient.BuildIndex(ctx, req)
|
||||
}
|
||||
|
||||
func NewClient(nodeAddress string) (*Client, error) {
|
||||
|
||||
return &Client{
|
||||
nodeAddress: nodeAddress,
|
||||
address: nodeAddress,
|
||||
ctx: context.Background(),
|
||||
}, nil
|
||||
}
|
||||
|
77
internal/distributed/indexnode/paramtable.go
Normal file
77
internal/distributed/indexnode/paramtable.go
Normal file
@ -0,0 +1,77 @@
|
||||
package grpcindexnode
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
)
|
||||
|
||||
type ParamTable struct {
|
||||
paramtable.BaseTable
|
||||
|
||||
IndexServerAddress string
|
||||
|
||||
IP string
|
||||
Port int
|
||||
Address string
|
||||
}
|
||||
|
||||
var Params ParamTable
|
||||
|
||||
func (pt *ParamTable) Init() {
|
||||
pt.BaseTable.Init()
|
||||
pt.initParams()
|
||||
}
|
||||
|
||||
// todo
|
||||
func (pt *ParamTable) LoadFromArgs() {
|
||||
|
||||
}
|
||||
|
||||
//todo
|
||||
func (pt *ParamTable) LoadFromEnv() {
|
||||
indexServiceAddress := os.Getenv("INDEX_SERVICE_ADDRESS")
|
||||
if indexServiceAddress != "" {
|
||||
pt.IndexServerAddress = indexServiceAddress
|
||||
}
|
||||
|
||||
Params.IP = funcutil.GetLocalIP()
|
||||
host := os.Getenv("PROXY_NODE_HOST")
|
||||
if len(host) > 0 {
|
||||
Params.IP = host
|
||||
}
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initParams() {
|
||||
|
||||
pt.initIndexServerAddress()
|
||||
}
|
||||
|
||||
// todo remove and use load from env
|
||||
func (pt *ParamTable) initIndexServerAddress() {
|
||||
addr, err := pt.Load("indexServer.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip indexServer.address")
|
||||
}
|
||||
}
|
||||
|
||||
port, err := pt.Load("indexServer.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pt.IndexServerAddress = addr + ":" + port
|
||||
}
|
@ -7,32 +7,142 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
serviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||
grpcindexserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
node typeutil.IndexNodeInterface
|
||||
impl *indexnode.NodeImpl
|
||||
|
||||
grpcServer *grpc.Server
|
||||
serverClient typeutil.IndexServiceInterface
|
||||
loopCtx context.Context
|
||||
loopCancel func()
|
||||
loopWg sync.WaitGroup
|
||||
grpcServer *grpc.Server
|
||||
grpcErrChan chan error
|
||||
|
||||
indexServiceClient *grpcindexserviceclient.Client
|
||||
loopCtx context.Context
|
||||
loopCancel func()
|
||||
loopWg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (s *Server) Run() error {
|
||||
|
||||
if err := s.init(); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
|
||||
defer s.loopWg.Done()
|
||||
|
||||
log.Println("network port: ", grpcPort)
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Printf("GrpcServer:failed to listen: %v", err)
|
||||
s.grpcErrChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(s.loopCtx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
indexpb.RegisterIndexNodeServer(s.grpcServer, s)
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
s.grpcErrChan <- err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
var err error
|
||||
Params.Init()
|
||||
Params.LoadFromEnv()
|
||||
Params.LoadFromArgs()
|
||||
Params.Port = funcutil.GetAvailablePort()
|
||||
Params.Address = Params.IP + ":" + strconv.FormatInt(int64(Params.Port), 10)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = s.Stop()
|
||||
if err != nil {
|
||||
log.Println("Init failed, and Stop failed")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
s.loopWg.Add(1)
|
||||
go s.startGrpcLoop(Params.Port)
|
||||
// wait for grpc server loop start
|
||||
err = <-s.grpcErrChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
indexServiceAddr := Params.IndexServerAddress
|
||||
s.indexServiceClient = grpcindexserviceclient.NewClient(indexServiceAddr)
|
||||
err = s.indexServiceClient.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.impl.SetIndexServiceClient(s.indexServiceClient)
|
||||
|
||||
indexnode.Params.Init()
|
||||
indexnode.Params.Port = Params.Port
|
||||
indexnode.Params.IP = Params.IP
|
||||
indexnode.Params.Address = Params.Address
|
||||
|
||||
s.impl.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
|
||||
|
||||
err = s.impl.Init()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) start() error {
|
||||
err := s.impl.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
s.loopCancel()
|
||||
if s.impl != nil {
|
||||
s.impl.Stop()
|
||||
}
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
s.loopWg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
return s.impl.BuildIndex(req)
|
||||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.node.GetComponentStates()
|
||||
return s.impl.GetComponentStates()
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
ret, err := s.node.GetTimeTickChannel()
|
||||
ret, err := s.impl.GetTimeTickChannel()
|
||||
resp := &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
@ -48,7 +158,7 @@ func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty)
|
||||
}
|
||||
|
||||
func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
ret, err := s.node.GetStatisticsChannel()
|
||||
ret, err := s.impl.GetStatisticsChannel()
|
||||
resp := &milvuspb.StringResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
@ -63,128 +173,18 @@ func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) registerNode() error {
|
||||
|
||||
log.Printf("Registering node. IP = %s, Port = %d", indexnode.Params.NodeIP, indexnode.Params.NodePort)
|
||||
|
||||
request := &indexpb.RegisterNodeRequest{
|
||||
Base: nil,
|
||||
Address: &commonpb.Address{
|
||||
Ip: indexnode.Params.NodeIP,
|
||||
Port: int64(indexnode.Params.NodePort),
|
||||
},
|
||||
}
|
||||
resp, err := s.serverClient.RegisterNode(request)
|
||||
if err != nil {
|
||||
log.Printf("IndexNode connect to IndexService failed, error= %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
indexnode.Params.NodeID = resp.InitParams.NodeID
|
||||
log.Println("Register indexNode successful with nodeID=", indexnode.Params.NodeID)
|
||||
|
||||
err = indexnode.Params.LoadFromKVPair(resp.InitParams.StartParams)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Server) grpcLoop() {
|
||||
defer s.loopWg.Done()
|
||||
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(indexnode.Params.NodePort))
|
||||
if err != nil {
|
||||
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
||||
}
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
indexpb.RegisterIndexNodeServer(s.grpcServer, s)
|
||||
if err = s.grpcServer.Serve(lis); err != nil {
|
||||
log.Fatalf("IndexNode grpc server fatal error=%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) startIndexNode() error {
|
||||
s.loopWg.Add(1)
|
||||
//TODO: How to make sure that grpc server has started successfully
|
||||
go s.grpcLoop()
|
||||
|
||||
log.Println("IndexNode grpc server start successfully")
|
||||
|
||||
err := s.registerNode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
indexnode.Params.Init()
|
||||
return s.node.Start()
|
||||
}
|
||||
|
||||
func Init() error {
|
||||
indexnode.Params.Init()
|
||||
|
||||
//Get native ip
|
||||
addresses, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for _, value := range addresses {
|
||||
if ipnet, ok := value.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
indexnode.Params.NodeIP = ipnet.IP.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Generate random and available port
|
||||
listener, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
indexnode.Params.NodePort = listener.Addr().(*net.TCPAddr).Port
|
||||
listener.Close()
|
||||
indexnode.Params.NodeAddress = indexnode.Params.NodeIP + ":" + strconv.FormatInt(int64(indexnode.Params.NodePort), 10)
|
||||
log.Println("IndexNode init successfully, nodeAddress=", indexnode.Params.NodeAddress)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
return s.startIndexNode()
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
s.node.Stop()
|
||||
s.loopCancel()
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
s.loopWg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
return s.node.BuildIndex(req)
|
||||
}
|
||||
|
||||
func NewGrpcServer(ctx context.Context) (*Server, error) {
|
||||
func NewServer(ctx context.Context) (*Server, error) {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
indexServiceClient := serviceclient.NewClient(indexnode.Params.ServiceAddress)
|
||||
|
||||
node, err := indexnode.CreateIndexNode(ctx1)
|
||||
node, err := indexnode.NewNodeImpl(ctx1)
|
||||
if err != nil {
|
||||
defer cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node.SetServiceClient(indexServiceClient)
|
||||
|
||||
return &Server{
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
node: node,
|
||||
serverClient: indexServiceClient,
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
impl: node,
|
||||
grpcErrChan: make(chan error),
|
||||
}, nil
|
||||
}
|
||||
|
@ -5,11 +5,13 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
@ -17,101 +19,74 @@ type UniqueID = typeutil.UniqueID
|
||||
type Client struct {
|
||||
grpcClient indexpb.IndexServiceClient
|
||||
address string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (g *Client) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Client) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Client) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (g *Client) GetTimeTickChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (g *Client) GetStatisticsChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (g *Client) tryConnect() error {
|
||||
if g.grpcClient != nil {
|
||||
func (c *Client) Init() error {
|
||||
connectGrpcFunc := func() error {
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.grpcClient = indexpb.NewIndexServiceClient(conn)
|
||||
return nil
|
||||
}
|
||||
ctx1, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
conn, err := grpc.DialContext(ctx1, g.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
err := retry.Retry(10, time.Millisecond*200, connectGrpcFunc)
|
||||
if err != nil {
|
||||
log.Printf("Connect to IndexService failed, error= %v", err)
|
||||
return err
|
||||
}
|
||||
g.grpcClient = indexpb.NewIndexServiceClient(conn)
|
||||
return nil
|
||||
}
|
||||
func (c *Client) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Client) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
err := g.tryConnect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
return g.grpcClient.RegisterNode(ctx, req)
|
||||
func (c *Client) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Client) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
err := g.tryConnect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (c *Client) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
ctx := context.TODO()
|
||||
return g.grpcClient.BuildIndex(ctx, req)
|
||||
return c.grpcClient.GetComponentStates(ctx, &commonpb.Empty{})
|
||||
}
|
||||
|
||||
func (g *Client) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
err := g.tryConnect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
return g.grpcClient.GetIndexStates(ctx, req)
|
||||
}
|
||||
func (g *Client) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
err := g.tryConnect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
return g.grpcClient.GetIndexFilePaths(ctx, req)
|
||||
func (c *Client) GetTimeTickChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (g *Client) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
err := g.tryConnect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (c *Client) GetStatisticsChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (c *Client) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
ctx := context.TODO()
|
||||
return g.grpcClient.NotifyBuildIndex(ctx, nty)
|
||||
return c.grpcClient.RegisterNode(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
ctx := context.TODO()
|
||||
return c.grpcClient.BuildIndex(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
ctx := context.TODO()
|
||||
return c.grpcClient.GetIndexStates(ctx, req)
|
||||
}
|
||||
func (c *Client) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
ctx := context.TODO()
|
||||
return c.grpcClient.GetIndexFilePaths(ctx, req)
|
||||
}
|
||||
|
||||
func (c *Client) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
ctx := context.TODO()
|
||||
return c.grpcClient.NotifyBuildIndex(ctx, nty)
|
||||
}
|
||||
|
||||
func NewClient(address string) *Client {
|
||||
|
||||
log.Println("new indexservice, address = ", address)
|
||||
log.Println("new index service, address = ", address)
|
||||
return &Client{
|
||||
address: address,
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
55
internal/distributed/indexservice/paramtable.go
Normal file
55
internal/distributed/indexservice/paramtable.go
Normal file
@ -0,0 +1,55 @@
|
||||
package grpcindexservice
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
)
|
||||
|
||||
type ParamTable struct {
|
||||
paramtable.BaseTable
|
||||
|
||||
ServiceAddress string
|
||||
ServicePort int
|
||||
}
|
||||
|
||||
var Params ParamTable
|
||||
|
||||
func (pt *ParamTable) Init() {
|
||||
pt.BaseTable.Init()
|
||||
pt.initParams()
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initParams() {
|
||||
pt.initServicePort()
|
||||
pt.initServiceAddress()
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initServicePort() {
|
||||
pt.ServicePort = pt.ParseInt("indexServer.port")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initServiceAddress() {
|
||||
addr, err := pt.Load("indexServer.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip proxyService.address")
|
||||
}
|
||||
}
|
||||
|
||||
port, err := pt.Load("indexServer.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pt.ServiceAddress = addr + ":" + port
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexservice"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
@ -21,17 +22,120 @@ type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type Server struct {
|
||||
server typeutil.IndexServiceInterface
|
||||
impl *indexservice.ServiceImpl
|
||||
|
||||
grpcServer *grpc.Server
|
||||
grpcServer *grpc.Server
|
||||
grpcErrChan chan error
|
||||
|
||||
loopCtx context.Context
|
||||
loopCancel func()
|
||||
loopWg sync.WaitGroup
|
||||
}
|
||||
|
||||
func (s *Server) Run() error {
|
||||
|
||||
if err := s.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) init() error {
|
||||
Params.Init()
|
||||
indexservice.Params.Init()
|
||||
|
||||
s.loopWg.Add(1)
|
||||
go s.startGrpcLoop(Params.ServicePort)
|
||||
// wait for grpc impl loop start
|
||||
if err := <-s.grpcErrChan; err != nil {
|
||||
return err
|
||||
}
|
||||
s.impl.UpdateStateCode(internalpb2.StateCode_INITIALIZING)
|
||||
|
||||
if err := s.impl.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) start() error {
|
||||
if err := s.impl.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("indexServer started")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
if s.impl != nil {
|
||||
s.impl.Stop()
|
||||
}
|
||||
|
||||
s.loopCancel()
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
s.loopWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
|
||||
return s.impl.RegisterNode(req)
|
||||
}
|
||||
|
||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
|
||||
return s.impl.BuildIndex(req)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
|
||||
return s.impl.GetIndexStates(req)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
|
||||
return s.impl.GetIndexFilePaths(req)
|
||||
}
|
||||
|
||||
func (s *Server) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
|
||||
return s.impl.NotifyBuildIndex(nty)
|
||||
}
|
||||
|
||||
func (s *Server) startGrpcLoop(grpcPort int) {
|
||||
|
||||
defer s.loopWg.Done()
|
||||
|
||||
log.Println("network port: ", grpcPort)
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
|
||||
if err != nil {
|
||||
log.Printf("GrpcServer:failed to listen: %v", err)
|
||||
s.grpcErrChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(s.loopCtx)
|
||||
defer cancel()
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
indexpb.RegisterIndexServiceServer(s.grpcServer, s)
|
||||
|
||||
go funcutil.CheckGrpcReady(ctx, s.grpcErrChan)
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
s.grpcErrChan <- err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) GetComponentStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
return s.server.GetComponentStates()
|
||||
return s.impl.GetComponentStates()
|
||||
}
|
||||
|
||||
func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
@ -40,7 +144,7 @@ func (s *Server) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty)
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
channel, err := s.server.GetTimeTickChannel()
|
||||
channel, err := s.impl.GetTimeTickChannel()
|
||||
if err != nil {
|
||||
resp.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
resp.Status.Reason = err.Error()
|
||||
@ -57,7 +161,7 @@ func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
channel, err := s.server.GetStatisticsChannel()
|
||||
channel, err := s.impl.GetStatisticsChannel()
|
||||
if err != nil {
|
||||
resp.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
resp.Status.Reason = err.Error()
|
||||
@ -67,88 +171,19 @@ func (s *Server) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func Init() error {
|
||||
indexservice.Params.Init()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
return s.startIndexServer()
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
s.server.Stop()
|
||||
s.loopCancel()
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
s.loopWg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
|
||||
return s.server.RegisterNode(req)
|
||||
}
|
||||
|
||||
func (s *Server) BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
|
||||
return s.server.BuildIndex(req)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
|
||||
return s.server.GetIndexStates(req)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
|
||||
return s.server.GetIndexFilePaths(req)
|
||||
}
|
||||
|
||||
func (s *Server) NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
|
||||
return s.server.NotifyBuildIndex(nty)
|
||||
}
|
||||
|
||||
func (s *Server) grpcLoop() {
|
||||
defer s.loopWg.Done()
|
||||
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(indexservice.Params.Port))
|
||||
if err != nil {
|
||||
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
||||
}
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
indexpb.RegisterIndexServiceServer(s.grpcServer, s)
|
||||
|
||||
if err = s.grpcServer.Serve(lis); err != nil {
|
||||
log.Fatalf("IndexServer grpc server fatal error=%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) startIndexServer() error {
|
||||
s.loopWg.Add(1)
|
||||
go s.grpcLoop()
|
||||
log.Println("IndexServer grpc server start successfully")
|
||||
|
||||
return s.server.Start()
|
||||
}
|
||||
|
||||
func NewServer(ctx context.Context) (*Server, error) {
|
||||
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
serverImp, err := indexservice.CreateIndexService(ctx)
|
||||
serverImp, err := indexservice.NewServiceImpl(ctx)
|
||||
if err != nil {
|
||||
defer cancel()
|
||||
return nil, err
|
||||
}
|
||||
s := &Server{
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
|
||||
server: serverImp,
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
impl: serverImp,
|
||||
grpcErrChan: make(chan error),
|
||||
}
|
||||
|
||||
return s, nil
|
||||
|
@ -47,10 +47,10 @@ type Server struct {
|
||||
indexServiceClient *grpcindexserviceclient.Client
|
||||
}
|
||||
|
||||
func NewServer() (*Server, error) {
|
||||
func NewServer(ctx context.Context) (*Server, error) {
|
||||
|
||||
server := &Server{
|
||||
ctx: context.Background(),
|
||||
ctx: ctx,
|
||||
grpcErrChan: make(chan error),
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ func (s *Server) init() error {
|
||||
}()
|
||||
|
||||
s.wg.Add(1)
|
||||
s.startGrpcLoop(Params.Port)
|
||||
go s.startGrpcLoop(Params.Port)
|
||||
// wait for grpc server loop start
|
||||
err = <-s.grpcErrChan
|
||||
if err != nil {
|
||||
|
@ -27,10 +27,10 @@ type Server struct {
|
||||
impl *proxyservice.ServiceImpl
|
||||
}
|
||||
|
||||
func NewServer() (*Server, error) {
|
||||
func NewServer(ctx context.Context) (*Server, error) {
|
||||
|
||||
server := &Server{
|
||||
ctx: context.Background(),
|
||||
ctx: ctx,
|
||||
grpcErrChan: make(chan error),
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ func (s *Server) init() error {
|
||||
proxyservice.Params.Init()
|
||||
|
||||
s.wg.Add(1)
|
||||
s.startGrpcLoop(Params.ServicePort)
|
||||
go s.startGrpcLoop(Params.ServicePort)
|
||||
// wait for grpc server loop start
|
||||
if err := <-s.grpcErrChan; err != nil {
|
||||
return err
|
||||
|
@ -3,7 +3,6 @@ package indexnode
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/funcutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
@ -23,8 +23,8 @@ const (
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type IndexNode struct {
|
||||
state internalpb2.StateCode
|
||||
type NodeImpl struct {
|
||||
stateCode internalpb2.StateCode
|
||||
|
||||
loopCtx context.Context
|
||||
loopCancel func()
|
||||
@ -40,17 +40,50 @@ type IndexNode struct {
|
||||
closeCallbacks []func()
|
||||
}
|
||||
|
||||
func Init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func CreateIndexNode(ctx context.Context) (*IndexNode, error) {
|
||||
func NewNodeImpl(ctx context.Context) (*NodeImpl, error) {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
b := &IndexNode{
|
||||
b := &NodeImpl{
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
}
|
||||
var err error
|
||||
b.sched, err = NewTaskScheduler(b.loopCtx, b.kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (i *NodeImpl) Init() error {
|
||||
log.Println("AAAAAAAAAAAAAAAAA", i.serviceClient)
|
||||
err := funcutil.WaitForComponentReady(i.serviceClient, "IndexService", 10, time.Second)
|
||||
log.Println("BBBBBBBBB", i.serviceClient)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request := &indexpb.RegisterNodeRequest{
|
||||
Base: nil,
|
||||
Address: &commonpb.Address{
|
||||
Ip: Params.IP,
|
||||
Port: int64(Params.Port),
|
||||
},
|
||||
}
|
||||
|
||||
resp, err2 := i.serviceClient.RegisterNode(request)
|
||||
if err2 != nil {
|
||||
log.Printf("Index NodeImpl connect to IndexService failed, error= %v", err)
|
||||
return err2
|
||||
}
|
||||
|
||||
if resp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return errors.New(resp.Status.Reason)
|
||||
}
|
||||
|
||||
err = Params.LoadConfigFromInitParams(resp.InitParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
connectMinIOFn := func() error {
|
||||
option := &miniokv.Option{
|
||||
@ -62,73 +95,55 @@ func CreateIndexNode(ctx context.Context) (*IndexNode, error) {
|
||||
CreateBucket: true,
|
||||
}
|
||||
var err error
|
||||
b.kv, err = miniokv.NewMinIOKV(b.loopCtx, option)
|
||||
i.kv, err = miniokv.NewMinIOKV(i.loopCtx, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := retry.Retry(10, time.Millisecond*200, connectMinIOFn)
|
||||
err = retry.Retry(10, time.Millisecond*200, connectMinIOFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
b.sched, err = NewTaskScheduler(b.loopCtx, b.kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.UpdateStateCode(internalpb2.StateCode_HEALTHY)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (i *IndexNode) AddStartCallback(callbacks ...func()) {
|
||||
i.startCallbacks = append(i.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (i *IndexNode) AddCloseCallback(callbacks ...func()) {
|
||||
i.closeCallbacks = append(i.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (i *IndexNode) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) Start() error {
|
||||
func (i *NodeImpl) Start() error {
|
||||
i.sched.Start()
|
||||
|
||||
// Start callbacks
|
||||
for _, cb := range i.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) stopIndexNodeLoop() {
|
||||
i.loopCancel()
|
||||
|
||||
i.sched.Close()
|
||||
}
|
||||
|
||||
// Close closes the server.
|
||||
func (i *IndexNode) Stop() error {
|
||||
i.stopIndexNodeLoop()
|
||||
|
||||
func (i *NodeImpl) Stop() error {
|
||||
i.loopCancel()
|
||||
if i.sched != nil {
|
||||
i.sched.Close()
|
||||
}
|
||||
for _, cb := range i.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
log.Print("IndexNode closed.")
|
||||
log.Print("NodeImpl closed.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) SetServiceClient(serviceClient typeutil.IndexServiceInterface) {
|
||||
func (i *NodeImpl) UpdateStateCode(code internalpb2.StateCode) {
|
||||
i.stateCode = code
|
||||
}
|
||||
|
||||
func (i *NodeImpl) SetIndexServiceClient(serviceClient typeutil.IndexServiceInterface) {
|
||||
i.serviceClient = serviceClient
|
||||
}
|
||||
|
||||
func (i *IndexNode) BuildIndex(request *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
func (i *NodeImpl) BuildIndex(request *indexpb.BuildIndexCmd) (*commonpb.Status, error) {
|
||||
t := newIndexBuildTask()
|
||||
t.cmd = request
|
||||
t.kv = i.kv
|
||||
@ -166,12 +181,22 @@ func (i *IndexNode) BuildIndex(request *indexpb.BuildIndexCmd) (*commonpb.Status
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (i *NodeImpl) AddStartCallback(callbacks ...func()) {
|
||||
i.startCallbacks = append(i.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (i *NodeImpl) AddCloseCallback(callbacks ...func()) {
|
||||
i.closeCallbacks = append(i.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (i *NodeImpl) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
|
||||
stateInfo := &internalpb2.ComponentInfo{
|
||||
NodeID: Params.NodeID,
|
||||
Role: "IndexNode",
|
||||
StateCode: i.state,
|
||||
Role: "NodeImpl",
|
||||
StateCode: i.stateCode,
|
||||
}
|
||||
|
||||
ret := &internalpb2.ComponentStates{
|
||||
@ -184,10 +209,10 @@ func (i *IndexNode) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) GetTimeTickChannel() (string, error) {
|
||||
func (i *NodeImpl) GetTimeTickChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (i *IndexNode) GetStatisticsChannel() (string, error) {
|
||||
func (i *NodeImpl) GetStatisticsChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
|
||||
var buildClient *IndexNode
|
||||
var buildClient *NodeImpl
|
||||
|
||||
var masterPort = 53101
|
||||
var masterServer *master.Master
|
||||
@ -62,7 +62,7 @@ func startMaster(ctx context.Context) {
|
||||
|
||||
func startBuilder(ctx context.Context) {
|
||||
var err error
|
||||
buildClient, err = CreateIndexNode(ctx)
|
||||
buildClient, err = NewNodeImpl(ctx)
|
||||
if err != nil {
|
||||
log.Print("create builder failed", zap.Error(err))
|
||||
}
|
||||
|
@ -1,25 +1,28 @@
|
||||
package indexnode
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"bytes"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cast"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
)
|
||||
|
||||
const (
|
||||
StartParamsKey = "START_PARAMS"
|
||||
)
|
||||
|
||||
type ParamTable struct {
|
||||
paramtable.BaseTable
|
||||
|
||||
IP string
|
||||
Address string
|
||||
Port int
|
||||
|
||||
NodeAddress string
|
||||
NodeIP string
|
||||
NodePort int
|
||||
ServiceAddress string
|
||||
ServicePort int
|
||||
|
||||
NodeID int64
|
||||
|
||||
MasterAddress string
|
||||
@ -38,12 +41,11 @@ var Params ParamTable
|
||||
|
||||
func (pt *ParamTable) Init() {
|
||||
pt.BaseTable.Init()
|
||||
pt.initAddress()
|
||||
pt.initPort()
|
||||
pt.initIndexServerAddress()
|
||||
pt.initIndexServerPort()
|
||||
pt.initParams()
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initParams() {
|
||||
pt.initEtcdAddress()
|
||||
pt.initMasterAddress()
|
||||
pt.initMetaRootPath()
|
||||
pt.initMinIOAddress()
|
||||
pt.initMinIOAccessKeyID()
|
||||
@ -52,69 +54,53 @@ func (pt *ParamTable) Init() {
|
||||
pt.initMinioBucketName()
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initAddress() {
|
||||
addr, err := pt.Load("indexServer.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
func (pt *ParamTable) LoadConfigFromInitParams(initParams *internalpb2.InitParams) error {
|
||||
pt.NodeID = initParams.NodeID
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip indexBuilder.address")
|
||||
config := viper.New()
|
||||
config.SetConfigType("yaml")
|
||||
for _, pair := range initParams.StartParams {
|
||||
if pair.Key == StartParamsKey {
|
||||
err := config.ReadConfig(bytes.NewBuffer([]byte(pair.Value)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
port, err := pt.Load("indexServer.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pt.Address = addr + ":" + port
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initPort() {
|
||||
pt.Port = pt.ParseInt("indexServer.port")
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initIndexServerAddress() {
|
||||
//TODO: save IndexService address in paramtable kv?
|
||||
serviceAddr := os.Getenv("INDEX_SERVICE_ADDRESS")
|
||||
if serviceAddr == "" {
|
||||
addr, err := pt.Load("indexServer.address")
|
||||
for _, key := range config.AllKeys() {
|
||||
val := config.Get(key)
|
||||
str, err := cast.ToStringE(val)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
switch val := val.(type) {
|
||||
case []interface{}:
|
||||
str = str[:0]
|
||||
for _, v := range val {
|
||||
ss, err := cast.ToStringE(v)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
if len(str) == 0 {
|
||||
str = ss
|
||||
} else {
|
||||
str = str + "," + ss
|
||||
}
|
||||
}
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip indexServer.address")
|
||||
default:
|
||||
log.Panicf("undefine config type, key=%s", key)
|
||||
}
|
||||
}
|
||||
|
||||
port, err := pt.Load("indexServer.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = strconv.Atoi(port)
|
||||
err = pt.Save(key, str)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pt.ServiceAddress = addr + ":" + port
|
||||
return
|
||||
}
|
||||
|
||||
pt.ServiceAddress = serviceAddr
|
||||
}
|
||||
|
||||
func (pt ParamTable) initIndexServerPort() {
|
||||
pt.ServicePort = pt.ParseInt("indexServer.port")
|
||||
pt.initParams()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initEtcdAddress() {
|
||||
@ -137,14 +123,6 @@ func (pt *ParamTable) initMetaRootPath() {
|
||||
pt.MetaRootPath = rootPath + "/" + subPath
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initMasterAddress() {
|
||||
ret, err := pt.Load("_MasterAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pt.MasterAddress = ret
|
||||
}
|
||||
|
||||
func (pt *ParamTable) initMinIOAddress() {
|
||||
ret, err := pt.Load("_MinioAddress")
|
||||
if err != nil {
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
@ -16,18 +18,16 @@ import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
const (
|
||||
reqTimeoutInterval = time.Second * 10
|
||||
)
|
||||
|
||||
type IndexService struct {
|
||||
type ServiceImpl struct {
|
||||
nodeClients *PriorityQueue
|
||||
nodeStates map[UniqueID]*internalpb2.ComponentStates
|
||||
state internalpb2.StateCode
|
||||
stateCode internalpb2.StateCode
|
||||
|
||||
ID UniqueID
|
||||
|
||||
@ -53,14 +53,18 @@ type IndexService struct {
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
func CreateIndexService(ctx context.Context) (*IndexService, error) {
|
||||
func NewServiceImpl(ctx context.Context) (*ServiceImpl, error) {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
i := &IndexService{
|
||||
i := &ServiceImpl{
|
||||
loopCtx: ctx1,
|
||||
loopCancel: cancel,
|
||||
nodeClients: &PriorityQueue{},
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) Init() error {
|
||||
etcdAddress := Params.EtcdAddress
|
||||
log.Println("etcd address = ", etcdAddress)
|
||||
connectEtcdFn := func() error {
|
||||
@ -78,19 +82,19 @@ func CreateIndexService(ctx context.Context) (*IndexService, error) {
|
||||
}
|
||||
err := retry.Retry(10, time.Millisecond*200, connectEtcdFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
//init idAllocator
|
||||
kvRootPath := Params.KvRootPath
|
||||
i.idAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "index_gid"))
|
||||
if err := i.idAllocator.Initialize(); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
i.ID, err = i.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
connectMinIOFn := func() error {
|
||||
@ -111,33 +115,29 @@ func CreateIndexService(ctx context.Context) (*IndexService, error) {
|
||||
}
|
||||
err = retry.Retry(10, time.Millisecond*200, connectMinIOFn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
i.sched, err = NewTaskScheduler(i.loopCtx, i.idAllocator, i.kv, i.metaTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (i *IndexService) Init() error {
|
||||
i.UpdateStateCode(internalpb2.StateCode_HEALTHY)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexService) Start() error {
|
||||
func (i *ServiceImpl) Start() error {
|
||||
i.sched.Start()
|
||||
// Start callbacks
|
||||
for _, cb := range i.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
log.Print("IndexService closed.")
|
||||
log.Print("ServiceImpl start")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexService) Stop() error {
|
||||
func (i *ServiceImpl) Stop() error {
|
||||
i.loopCancel()
|
||||
i.sched.Close()
|
||||
for _, cb := range i.closeCallbacks {
|
||||
@ -146,12 +146,16 @@ func (i *IndexService) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexService) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
func (i *ServiceImpl) UpdateStateCode(code internalpb2.StateCode) {
|
||||
i.stateCode = code
|
||||
}
|
||||
|
||||
func (i *ServiceImpl) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
|
||||
stateInfo := &internalpb2.ComponentInfo{
|
||||
NodeID: i.ID,
|
||||
Role: "IndexService",
|
||||
StateCode: i.state,
|
||||
Role: "ServiceImpl",
|
||||
StateCode: i.stateCode,
|
||||
}
|
||||
|
||||
ret := &internalpb2.ComponentStates{
|
||||
@ -164,15 +168,15 @@ func (i *IndexService) GetComponentStates() (*internalpb2.ComponentStates, error
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexService) GetTimeTickChannel() (string, error) {
|
||||
func (i *ServiceImpl) GetTimeTickChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (i *IndexService) GetStatisticsChannel() (string, error) {
|
||||
func (i *ServiceImpl) GetStatisticsChannel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (i *IndexService) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
func (i *ServiceImpl) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
ret := &indexpb.BuildIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
@ -221,7 +225,7 @@ func (i *IndexService) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.Buil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexService) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
func (i *ServiceImpl) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
var indexStates []*indexpb.IndexInfo
|
||||
for _, indexID := range req.IndexIDs {
|
||||
indexState, err := i.metaTable.GetIndexState(indexID)
|
||||
@ -239,7 +243,7 @@ func (i *IndexService) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexService) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
func (i *ServiceImpl) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
var indexPaths []*indexpb.IndexFilePathInfo
|
||||
|
||||
for _, indexID := range req.IndexIDs {
|
||||
@ -256,7 +260,7 @@ func (i *IndexService) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*i
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (i *IndexService) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
func (i *ServiceImpl) NotifyBuildIndex(nty *indexpb.BuildIndexNotification) (*commonpb.Status, error) {
|
||||
ret := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}
|
||||
|
@ -10,13 +10,13 @@ import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
func (i *IndexService) removeNode(nodeID UniqueID) {
|
||||
func (i *ServiceImpl) removeNode(nodeID UniqueID) {
|
||||
i.nodeLock.Lock()
|
||||
defer i.nodeLock.Unlock()
|
||||
i.nodeClients.Remove(nodeID)
|
||||
}
|
||||
|
||||
func (i *IndexService) addNode(nodeID UniqueID, req *indexpb.RegisterNodeRequest) error {
|
||||
func (i *ServiceImpl) addNode(nodeID UniqueID, req *indexpb.RegisterNodeRequest) error {
|
||||
i.nodeLock.Lock()
|
||||
defer i.nodeLock.Unlock()
|
||||
|
||||
@ -40,7 +40,7 @@ func (i *IndexService) addNode(nodeID UniqueID, req *indexpb.RegisterNodeRequest
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *IndexService) prepareNodeInitParams() []*commonpb.KeyValuePair {
|
||||
func (i *ServiceImpl) prepareNodeInitParams() []*commonpb.KeyValuePair {
|
||||
var params []*commonpb.KeyValuePair
|
||||
params = append(params, &commonpb.KeyValuePair{Key: "minio.address", Value: Params.MinIOAddress})
|
||||
params = append(params, &commonpb.KeyValuePair{Key: "minio.accessKeyID", Value: Params.MinIOAccessKeyID})
|
||||
@ -50,7 +50,7 @@ func (i *IndexService) prepareNodeInitParams() []*commonpb.KeyValuePair {
|
||||
return params
|
||||
}
|
||||
|
||||
func (i *IndexService) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
func (i *ServiceImpl) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error) {
|
||||
ret := &indexpb.RegisterNodeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
@ -59,7 +59,7 @@ func (i *IndexService) RegisterNode(req *indexpb.RegisterNodeRequest) (*indexpb.
|
||||
|
||||
nodeID, err := i.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
ret.Status.Reason = "IndexService:RegisterNode Failed to acquire NodeID"
|
||||
ret.Status.Reason = "ServiceImpl:RegisterNode Failed to acquire NodeID"
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,9 @@ func (c *Core) checkInit() error {
|
||||
if c.DataNodeSegmentFlushCompletedChan == nil {
|
||||
return errors.Errorf("DataNodeSegmentFlushCompletedChan is nil")
|
||||
}
|
||||
log.Printf("master node id = %d\n", Params.NodeID)
|
||||
log.Printf("master node id = %d", Params.NodeID)
|
||||
log.Printf("master dd channel name = %s", Params.DdChannel)
|
||||
log.Printf("master time ticke channel name = %s", Params.TimeTickChannel)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -607,6 +609,7 @@ func (c *Core) SetProxyService(s ProxyServiceInterface) error {
|
||||
return err
|
||||
}
|
||||
Params.ProxyTimeTickChannel = rsp
|
||||
log.Printf("proxy time tick channel name = %s", Params.ProxyTimeTickChannel)
|
||||
|
||||
c.InvalidateCollectionMetaCache = func(ts typeutil.Timestamp, dbName string, collectionName string) error {
|
||||
err := s.InvalidateCollectionMetaCache(&proxypb.InvalidateCollMetaCacheRequest{
|
||||
@ -633,6 +636,8 @@ func (c *Core) SetDataService(s DataServiceInterface) error {
|
||||
return err
|
||||
}
|
||||
Params.DataServiceSegmentChannel = rsp
|
||||
log.Printf("data service segment channel name = %s", Params.DataServiceSegmentChannel)
|
||||
|
||||
c.GetBinlogFilePathsFromDataServiceReq = func(segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error) {
|
||||
ts, err := c.tsoAllocator.Alloc(1)
|
||||
if err != nil {
|
||||
|
@ -39,8 +39,9 @@ func (s *ServiceImpl) fillNodeInitParams() error {
|
||||
|
||||
getConfigContentByName := func(fileName string) []byte {
|
||||
_, fpath, _, _ := runtime.Caller(0)
|
||||
configFile := path.Dir(fpath) + "/../../../configs/" + fileName
|
||||
configFile := path.Dir(fpath) + "/../../configs/" + fileName
|
||||
_, err := os.Stat(configFile)
|
||||
log.Printf("configFile = %s", configFile)
|
||||
if os.IsNotExist(err) {
|
||||
runPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
|
@ -43,30 +43,3 @@ func NewServiceImpl(ctx context.Context) (*ServiceImpl, error) {
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func CreateProxyService(ctx context.Context) (ProxyService, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
s := &ServiceImpl{
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
s.allocator = NewNodeIDAllocator()
|
||||
s.sched = NewTaskScheduler(ctx1)
|
||||
s.nodeInfos = NewGlobalNodeInfoTable()
|
||||
s.stateCode = internalpb2.StateCode_ABNORMAL
|
||||
/*
|
||||
s.state = &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{
|
||||
NodeID: 0,
|
||||
Role: "proxyservice",
|
||||
StateCode: internalpb2.StateCode_INITIALIZING,
|
||||
},
|
||||
SubcomponentStates: nil,
|
||||
Status: &commonpb.Status{},
|
||||
}
|
||||
*/
|
||||
return s, nil
|
||||
}
|
||||
|
@ -2,10 +2,15 @@ package funcutil
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/go-basic/ipv4"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/retry"
|
||||
)
|
||||
|
||||
func CheckGrpcReady(ctx context.Context, targetCh chan error) {
|
||||
@ -30,3 +35,28 @@ func GetAvailablePort() int {
|
||||
func GetLocalIP() string {
|
||||
return ipv4.LocalIP()
|
||||
}
|
||||
|
||||
func WaitForComponentReady(service StateComponent, serviceName string, attempts int, sleep time.Duration) error {
|
||||
checkFunc := func() error {
|
||||
resp, err := service.GetComponentStates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return errors.New(resp.Status.Reason)
|
||||
}
|
||||
|
||||
if resp.State.StateCode != internalpb2.StateCode_HEALTHY {
|
||||
return errors.New("")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
err := retry.Retry(attempts, sleep, checkFunc)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("ProxyNode wait for %s ready failed", serviceName)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
7
internal/util/funcutil/interface.go
Normal file
7
internal/util/funcutil/interface.go
Normal file
@ -0,0 +1,7 @@
|
||||
package funcutil
|
||||
|
||||
import "github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
type StateComponent interface {
|
||||
GetComponentStates() (*internalpb2.ComponentStates, error)
|
||||
}
|
@ -18,7 +18,7 @@ go test -race -cover "${MILVUS_DIR}/kv/..." -failfast
|
||||
# TODO: remove to distributed
|
||||
#go test -race -cover "${MILVUS_DIR}/proxynode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/writenode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/datanode/..." -failfast
|
||||
go test -race -cover "${MILVUS_DIR}/datanode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/master/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/indexnode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/storage" "${MILVUS_DIR}/util/..." -failfast
|
||||
@ -29,3 +29,4 @@ go test -race -cover "${MILVUS_DIR}/msgstream/..." -failfast
|
||||
|
||||
go test -race -cover -v "${MILVUS_DIR}/masterservice" "${MILVUS_DIR}/distributed/masterservice" -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." -failfast
|
||||
go test -race -cover "${MILVUS_DIR}/dataservice/..." -failfast
|
||||
|
Loading…
Reference in New Issue
Block a user