2021-04-19 11:12:56 +08:00
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
2021-06-18 21:30:08 +08:00
|
|
|
package rootcoord
|
2021-01-19 14:44:03 +08:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-05-14 10:05:18 +08:00
|
|
|
"encoding/json"
|
2021-01-25 18:33:10 +08:00
|
|
|
"fmt"
|
2021-01-19 14:44:03 +08:00
|
|
|
"math/rand"
|
2021-08-18 14:36:10 +08:00
|
|
|
"strconv"
|
2021-01-19 14:44:03 +08:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2021-11-22 16:23:17 +08:00
|
|
|
"syscall"
|
2021-01-19 14:44:03 +08:00
|
|
|
"time"
|
|
|
|
|
2021-11-19 13:57:12 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/common"
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/allocator"
|
2021-10-01 11:08:24 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/kv"
|
2021-04-22 14:45:57 +08:00
|
|
|
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
2021-06-01 11:04:31 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metrics"
|
2021-04-22 14:45:57 +08:00
|
|
|
ms "github.com/milvus-io/milvus/internal/msgstream"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2021-05-15 18:08:08 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
2021-06-22 16:14:09 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
2021-05-15 18:08:08 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/tso"
|
|
|
|
"github.com/milvus-io/milvus/internal/types"
|
2021-10-01 11:08:24 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/retry"
|
2021-05-21 19:28:52 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
2021-06-30 16:18:13 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/trace"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/tsoutil"
|
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2021-08-24 09:45:51 +08:00
|
|
|
clientv3 "go.etcd.io/etcd/client/v3"
|
2021-06-08 19:25:37 +08:00
|
|
|
"go.uber.org/zap"
|
2021-01-19 14:44:03 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// ------------------ struct -----------------------
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
// DdOperation used to save ddMsg into ETCD
|
|
|
|
type DdOperation struct {
|
2021-09-23 10:37:54 +08:00
|
|
|
Body []byte `json:"body"`
|
2021-07-06 09:16:03 +08:00
|
|
|
Type string `json:"type"`
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
|
|
|
|
2021-06-01 11:04:31 +08:00
|
|
|
const (
|
|
|
|
// MetricRequestsTotal used to count the num of total requests
|
|
|
|
MetricRequestsTotal = "total"
|
|
|
|
|
|
|
|
// MetricRequestsSuccess used to count the num of successful requests
|
|
|
|
MetricRequestsSuccess = "success"
|
|
|
|
)
|
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
func metricProxy(v int64) string {
|
2021-06-01 11:04:31 +08:00
|
|
|
return fmt.Sprintf("client_%d", v)
|
|
|
|
}
|
|
|
|
|
2021-06-17 16:47:57 +08:00
|
|
|
// Core root coordinator core
|
2021-01-19 14:44:03 +08:00
|
|
|
type Core struct {
|
2021-09-23 15:10:00 +08:00
|
|
|
MetaTable *MetaTable
|
2021-01-19 14:44:03 +08:00
|
|
|
//id allocator
|
2021-05-20 14:14:14 +08:00
|
|
|
IDAllocator func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error)
|
|
|
|
IDAllocatorUpdate func() error
|
2021-04-08 17:31:39 +08:00
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
//tso allocator
|
2021-05-20 14:14:14 +08:00
|
|
|
TSOAllocator func(count uint32) (typeutil.Timestamp, error)
|
|
|
|
TSOAllocatorUpdate func() error
|
2021-01-19 14:44:03 +08:00
|
|
|
|
|
|
|
//inner members
|
|
|
|
ctx context.Context
|
|
|
|
cancel context.CancelFunc
|
2021-09-17 12:37:50 +08:00
|
|
|
wg sync.WaitGroup
|
2021-01-19 14:44:03 +08:00
|
|
|
etcdCli *clientv3.Client
|
2021-09-15 22:05:49 +08:00
|
|
|
kvBase kv.TxnKV //*etcdkv.EtcdKV
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
//DDL lock
|
|
|
|
ddlLock sync.Mutex
|
|
|
|
|
2021-09-15 22:05:49 +08:00
|
|
|
kvBaseCreate func(root string) (kv.TxnKV, error)
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
//setMsgStreams, send time tick into dd channel and time tick channel
|
2021-08-18 14:36:10 +08:00
|
|
|
SendTimeTick func(t typeutil.Timestamp, reason string) error
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
//setMsgStreams, send create collection into dd channel
|
2021-09-27 18:10:00 +08:00
|
|
|
//returns corresponding message id for each channel
|
|
|
|
SendDdCreateCollectionReq func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error)
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
//setMsgStreams, send drop collection into dd channel, and notify the proxy to delete this collection
|
2021-06-11 16:39:29 +08:00
|
|
|
SendDdDropCollectionReq func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
//setMsgStreams, send create partition into dd channel
|
2021-06-11 16:39:29 +08:00
|
|
|
SendDdCreatePartitionReq func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
//setMsgStreams, send drop partition into dd channel
|
2021-06-11 16:39:29 +08:00
|
|
|
SendDdDropPartitionReq func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-02-05 14:09:55 +08:00
|
|
|
//get binlog file path from data service,
|
2021-06-30 16:18:13 +08:00
|
|
|
CallGetBinlogFilePathsService func(ctx context.Context, segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error)
|
|
|
|
CallGetNumRowsService func(ctx context.Context, segID typeutil.UniqueID, isFromFlushedChan bool) (int64, error)
|
2021-07-03 14:36:18 +08:00
|
|
|
CallGetFlushedSegmentsService func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error)
|
2021-01-21 10:01:29 +08:00
|
|
|
|
2021-02-05 14:09:55 +08:00
|
|
|
//call index builder's client to build index, return build id
|
2021-05-25 14:03:06 +08:00
|
|
|
CallBuildIndexService func(ctx context.Context, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo) (typeutil.UniqueID, error)
|
|
|
|
CallDropIndexService func(ctx context.Context, indexID typeutil.UniqueID) error
|
2021-01-21 10:01:29 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
NewProxyClient func(sess *sessionutil.Session) (types.Proxy, error)
|
2021-01-23 10:12:41 +08:00
|
|
|
|
2021-02-05 14:09:55 +08:00
|
|
|
//query service interface, notify query service to release collection
|
2021-06-22 16:08:08 +08:00
|
|
|
CallReleaseCollectionService func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID) error
|
|
|
|
CallReleasePartitionService func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) error
|
2021-02-05 14:09:55 +08:00
|
|
|
|
2021-11-11 00:54:45 +08:00
|
|
|
CallWatchChannels func(ctx context.Context, collectionID int64, channelNames []string) error
|
|
|
|
|
2021-11-03 21:04:14 +08:00
|
|
|
// dml channels used for insert
|
2021-06-04 15:00:34 +08:00
|
|
|
dmlChannels *dmlChannels
|
|
|
|
|
2021-11-03 21:04:14 +08:00
|
|
|
// delta channels used for delete
|
|
|
|
deltaChannels *dmlChannels
|
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
//Proxy manager
|
|
|
|
proxyManager *proxyManager
|
2021-05-26 20:14:30 +08:00
|
|
|
|
|
|
|
// proxy clients
|
|
|
|
proxyClientManager *proxyClientManager
|
|
|
|
|
2021-09-03 17:15:26 +08:00
|
|
|
// metrics cache manager
|
|
|
|
metricsCacheManager *metricsinfo.MetricsCacheManager
|
|
|
|
|
2021-05-21 16:08:12 +08:00
|
|
|
// channel timetick
|
|
|
|
chanTimeTick *timetickSync
|
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
//time tick loop
|
|
|
|
lastTimeTick typeutil.Timestamp
|
|
|
|
|
|
|
|
//states code
|
|
|
|
stateCode atomic.Value
|
|
|
|
|
|
|
|
//call once
|
|
|
|
initOnce sync.Once
|
|
|
|
startOnce sync.Once
|
2021-02-23 11:40:30 +08:00
|
|
|
//isInit atomic.Value
|
2021-02-08 14:30:54 +08:00
|
|
|
|
2021-10-14 16:40:35 +08:00
|
|
|
session *sessionutil.Session
|
2021-05-21 19:28:52 +08:00
|
|
|
|
2021-02-08 14:30:54 +08:00
|
|
|
msFactory ms.Factory
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// --------------------- function --------------------------
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// NewCore create rootcoord core
|
2021-02-08 14:30:54 +08:00
|
|
|
func NewCore(c context.Context, factory ms.Factory) (*Core, error) {
|
2021-01-19 14:44:03 +08:00
|
|
|
ctx, cancel := context.WithCancel(c)
|
|
|
|
rand.Seed(time.Now().UnixNano())
|
|
|
|
core := &Core{
|
2021-02-08 14:30:54 +08:00
|
|
|
ctx: ctx,
|
|
|
|
cancel: cancel,
|
2021-08-18 14:36:10 +08:00
|
|
|
ddlLock: sync.Mutex{},
|
2021-02-08 14:30:54 +08:00
|
|
|
msFactory: factory,
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-03-12 14:22:09 +08:00
|
|
|
core.UpdateStateCode(internalpb.StateCode_Abnormal)
|
2021-01-19 14:44:03 +08:00
|
|
|
return core, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// UpdateStateCode update state code
|
2021-03-12 14:22:09 +08:00
|
|
|
func (c *Core) UpdateStateCode(code internalpb.StateCode) {
|
2021-02-23 11:40:30 +08:00
|
|
|
c.stateCode.Store(code)
|
|
|
|
}
|
|
|
|
|
2021-11-19 12:11:12 +08:00
|
|
|
func (c *Core) checkHealthy() (internalpb.StateCode, bool) {
|
2021-08-31 11:45:59 +08:00
|
|
|
code := c.stateCode.Load().(internalpb.StateCode)
|
2021-11-19 12:11:12 +08:00
|
|
|
ok := code == internalpb.StateCode_Healthy
|
|
|
|
return code, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
func failStatus(code commonpb.ErrorCode, reason string) *commonpb.Status {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: code,
|
|
|
|
Reason: reason,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func succStatus() *commonpb.Status {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
Reason: "",
|
|
|
|
}
|
2021-08-31 11:45:59 +08:00
|
|
|
}
|
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
func (c *Core) checkInit() error {
|
|
|
|
if c.MetaTable == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("metaTable is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
if c.IDAllocator == nil {
|
2021-04-08 15:26:18 +08:00
|
|
|
return fmt.Errorf("idAllocator is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
if c.IDAllocatorUpdate == nil {
|
2021-04-08 17:31:39 +08:00
|
|
|
return fmt.Errorf("idAllocatorUpdate is nil")
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
if c.TSOAllocator == nil {
|
2021-04-08 15:26:18 +08:00
|
|
|
return fmt.Errorf("tsoAllocator is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
if c.TSOAllocatorUpdate == nil {
|
2021-04-08 17:31:39 +08:00
|
|
|
return fmt.Errorf("tsoAllocatorUpdate is nil")
|
|
|
|
}
|
2021-01-19 14:44:03 +08:00
|
|
|
if c.etcdCli == nil {
|
2021-04-08 15:26:18 +08:00
|
|
|
return fmt.Errorf("etcdCli is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
if c.kvBase == nil {
|
2021-04-08 15:26:18 +08:00
|
|
|
return fmt.Errorf("kvBase is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
if c.SendDdCreateCollectionReq == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("sendDdCreateCollectionReq is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
if c.SendDdDropCollectionReq == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("sendDdDropCollectionReq is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
if c.SendDdCreatePartitionReq == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("sendDdCreatePartitionReq is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
if c.SendDdDropPartitionReq == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("sendDdDropPartitionReq is nil")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-25 14:03:06 +08:00
|
|
|
if c.CallGetBinlogFilePathsService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callGetBinlogFilePathsService is nil")
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-05-25 14:03:06 +08:00
|
|
|
if c.CallGetNumRowsService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callGetNumRowsService is nil")
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-05-25 14:03:06 +08:00
|
|
|
if c.CallBuildIndexService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callBuildIndexService is nil")
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-05-25 14:03:06 +08:00
|
|
|
if c.CallDropIndexService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callDropIndexService is nil")
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
if c.CallGetFlushedSegmentsService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callGetFlushedSegmentsService is nil")
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
2021-11-11 00:54:45 +08:00
|
|
|
if c.CallWatchChannels == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callWatchChannels is nil")
|
2021-11-11 00:54:45 +08:00
|
|
|
}
|
2021-05-26 20:14:30 +08:00
|
|
|
if c.NewProxyClient == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("newProxyClient is nil")
|
2021-05-25 14:03:06 +08:00
|
|
|
}
|
|
|
|
if c.CallReleaseCollectionService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callReleaseCollectionService is nil")
|
2021-01-23 10:12:41 +08:00
|
|
|
}
|
2021-06-22 16:08:08 +08:00
|
|
|
if c.CallReleasePartitionService == nil {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("callReleasePartitionService is nil")
|
2021-06-22 16:08:08 +08:00
|
|
|
}
|
2021-05-26 20:14:30 +08:00
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) startTimeTickLoop() {
|
2021-09-17 12:37:50 +08:00
|
|
|
defer c.wg.Done()
|
2021-05-31 16:48:31 +08:00
|
|
|
ticker := time.NewTicker(time.Duration(Params.TimeTickInterval) * time.Millisecond)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.ctx.Done():
|
2021-06-17 16:47:57 +08:00
|
|
|
log.Debug("rootcoord context closed", zap.Error(c.ctx.Err()))
|
2021-05-31 16:48:31 +08:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2021-08-18 14:36:10 +08:00
|
|
|
c.ddlLock.Lock()
|
2021-06-23 15:28:09 +08:00
|
|
|
if ts, err := c.TSOAllocator(1); err == nil {
|
2021-08-18 14:36:10 +08:00
|
|
|
c.SendTimeTick(ts, "timetick loop")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-08-18 14:36:10 +08:00
|
|
|
c.ddlLock.Unlock()
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-27 16:38:18 +08:00
|
|
|
func (c *Core) tsLoop() {
|
2021-09-17 12:37:50 +08:00
|
|
|
defer c.wg.Done()
|
2021-02-24 17:12:06 +08:00
|
|
|
tsoTicker := time.NewTicker(tso.UpdateTimestampStep)
|
2021-01-27 16:38:18 +08:00
|
|
|
defer tsoTicker.Stop()
|
|
|
|
ctx, cancel := context.WithCancel(c.ctx)
|
|
|
|
defer cancel()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tsoTicker.C:
|
2021-05-20 14:14:14 +08:00
|
|
|
if err := c.TSOAllocatorUpdate(); err != nil {
|
2021-03-15 15:45:17 +08:00
|
|
|
log.Warn("failed to update timestamp: ", zap.Error(err))
|
|
|
|
continue
|
2021-01-27 16:38:18 +08:00
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
if err := c.IDAllocatorUpdate(); err != nil {
|
2021-03-15 15:45:17 +08:00
|
|
|
log.Warn("failed to update id: ", zap.Error(err))
|
|
|
|
continue
|
2021-01-27 16:38:18 +08:00
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
// Server is closed and it should return nil.
|
2021-02-27 10:11:52 +08:00
|
|
|
log.Debug("tsLoop is closed")
|
2021-01-27 16:38:18 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
|
2021-07-03 17:54:25 +08:00
|
|
|
func (c *Core) checkFlushedSegmentsLoop() {
|
2021-09-17 12:37:50 +08:00
|
|
|
defer c.wg.Done()
|
2021-07-03 17:54:25 +08:00
|
|
|
ticker := time.NewTicker(10 * time.Minute)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.ctx.Done():
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord context done, exit check FlushedSegmentsLoop")
|
2021-07-03 17:54:25 +08:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
log.Debug("check flushed segments")
|
2021-08-25 14:41:52 +08:00
|
|
|
c.checkFlushedSegments(c.ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Core) checkFlushedSegments(ctx context.Context) {
|
|
|
|
collID2Meta, segID2IndexMeta, indexID2Meta := c.MetaTable.dupMeta()
|
|
|
|
for _, collMeta := range collID2Meta {
|
|
|
|
if len(collMeta.FieldIndexes) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, partID := range collMeta.PartitionIDs {
|
|
|
|
ctx2, cancel2 := context.WithTimeout(ctx, 3*time.Minute)
|
|
|
|
segIDs, err := c.CallGetFlushedSegmentsService(ctx2, collMeta.ID, partID)
|
|
|
|
if err != nil {
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("failed to get flushed segments from data coord",
|
2021-08-25 14:41:52 +08:00
|
|
|
zap.Int64("collection id", collMeta.ID),
|
|
|
|
zap.Int64("partition id", partID),
|
|
|
|
zap.Error(err))
|
|
|
|
} else {
|
|
|
|
for _, segID := range segIDs {
|
|
|
|
indexInfos := []*etcdpb.FieldIndexInfo{}
|
|
|
|
indexMeta, ok := segID2IndexMeta[segID]
|
|
|
|
if !ok {
|
|
|
|
indexInfos = append(indexInfos, collMeta.FieldIndexes...)
|
2021-07-03 17:54:25 +08:00
|
|
|
} else {
|
2021-08-25 14:41:52 +08:00
|
|
|
for _, idx := range collMeta.FieldIndexes {
|
|
|
|
if _, ok := indexMeta[idx.IndexID]; !ok {
|
|
|
|
indexInfos = append(indexInfos, idx)
|
2021-07-03 17:54:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-08-25 14:41:52 +08:00
|
|
|
for _, idxInfo := range indexInfos {
|
2021-09-14 18:37:48 +08:00
|
|
|
/* #nosec G601 */
|
2021-08-25 14:41:52 +08:00
|
|
|
field, err := GetFieldSchemaByID(&collMeta, idxInfo.FiledID)
|
|
|
|
if err != nil {
|
|
|
|
log.Debug("GetFieldSchemaByID",
|
|
|
|
zap.Any("collection_meta", collMeta),
|
|
|
|
zap.Int64("field id", idxInfo.FiledID))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
indexMeta, ok := indexID2Meta[idxInfo.IndexID]
|
|
|
|
if !ok {
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("index meta does not exist", zap.Int64("index_id", idxInfo.IndexID))
|
2021-08-25 14:41:52 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
info := etcdpb.SegmentIndexInfo{
|
|
|
|
CollectionID: collMeta.ID,
|
|
|
|
PartitionID: partID,
|
|
|
|
SegmentID: segID,
|
|
|
|
FieldID: idxInfo.FiledID,
|
|
|
|
IndexID: idxInfo.IndexID,
|
|
|
|
EnableIndex: false,
|
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("building index by background checker",
|
2021-08-25 14:41:52 +08:00
|
|
|
zap.Int64("segment_id", segID),
|
|
|
|
zap.Int64("index_id", indexMeta.IndexID),
|
|
|
|
zap.Int64("collection_id", collMeta.ID))
|
|
|
|
info.BuildID, err = c.BuildIndex(ctx2, segID, field, &indexMeta, false)
|
|
|
|
if err != nil {
|
|
|
|
log.Debug("build index failed",
|
|
|
|
zap.Int64("segment_id", segID),
|
|
|
|
zap.Int64("field_id", field.FieldID),
|
|
|
|
zap.Int64("index_id", indexMeta.IndexID))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if info.BuildID != 0 {
|
|
|
|
info.EnableIndex = true
|
|
|
|
}
|
2021-10-21 14:04:36 +08:00
|
|
|
if err := c.MetaTable.AddIndex(&info); err != nil {
|
2021-08-25 14:41:52 +08:00
|
|
|
log.Debug("Add index into meta table failed",
|
|
|
|
zap.Int64("collection_id", collMeta.ID),
|
|
|
|
zap.Int64("index_id", info.IndexID),
|
|
|
|
zap.Int64("build_id", info.BuildID),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
2021-07-03 17:54:25 +08:00
|
|
|
}
|
|
|
|
}
|
2021-08-25 14:41:52 +08:00
|
|
|
cancel2()
|
2021-07-03 17:54:25 +08:00
|
|
|
}
|
|
|
|
}
|
2021-05-26 20:14:30 +08:00
|
|
|
}
|
|
|
|
|
2021-07-03 14:36:18 +08:00
|
|
|
func (c *Core) getSegments(ctx context.Context, collID typeutil.UniqueID) (map[typeutil.UniqueID]typeutil.UniqueID, error) {
|
|
|
|
collMeta, err := c.MetaTable.GetCollectionByID(collID, 0)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
segID2PartID := map[typeutil.UniqueID]typeutil.UniqueID{}
|
|
|
|
for _, partID := range collMeta.PartitionIDs {
|
|
|
|
if seg, err := c.CallGetFlushedSegmentsService(ctx, collID, partID); err == nil {
|
|
|
|
for _, s := range seg {
|
|
|
|
segID2PartID[s] = partID
|
|
|
|
}
|
|
|
|
} else {
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("failed to get flushed segments from data coord", zap.Int64("collection_id", collID), zap.Int64("partition_id", partID), zap.Error(err))
|
2021-07-03 14:36:18 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return segID2PartID, nil
|
|
|
|
}
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
func (c *Core) setDdMsgSendFlag(b bool) error {
|
2021-10-21 14:04:36 +08:00
|
|
|
flag, err := c.MetaTable.txn.Load(DDMsgSendPrefix)
|
2021-05-14 21:26:06 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if (b && flag == "true") || (!b && flag == "false") {
|
|
|
|
log.Debug("DdMsg send flag need not change", zap.String("flag", flag))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-10-21 14:04:36 +08:00
|
|
|
err = c.MetaTable.txn.Save(DDMsgSendPrefix, strconv.FormatBool(b))
|
2021-05-18 14:18:02 +08:00
|
|
|
return err
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
func (c *Core) setMsgStreams() error {
|
2021-01-24 20:26:35 +08:00
|
|
|
if Params.PulsarAddress == "" {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("pulsarAddress is empty")
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
|
|
|
if Params.MsgChannelSubName == "" {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("msgChannelSubName is empty")
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
|
|
|
|
2021-06-17 16:47:57 +08:00
|
|
|
// rootcoord time tick channel
|
2021-01-24 20:26:35 +08:00
|
|
|
if Params.TimeTickChannel == "" {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("timeTickChannel is empty")
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-02-08 14:30:54 +08:00
|
|
|
timeTickStream, _ := c.msFactory.NewMsgStream(c.ctx)
|
2021-02-04 14:37:12 +08:00
|
|
|
timeTickStream.AsProducer([]string{Params.TimeTickChannel})
|
2021-06-17 16:47:57 +08:00
|
|
|
log.Debug("rootcoord AsProducer: " + Params.TimeTickChannel)
|
2021-01-20 09:36:50 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
c.SendTimeTick = func(t typeutil.Timestamp, reason string) error {
|
2021-01-20 09:36:50 +08:00
|
|
|
msgPack := ms.MsgPack{}
|
|
|
|
baseMsg := ms.BaseMsg{
|
|
|
|
BeginTimestamp: t,
|
|
|
|
EndTimestamp: t,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
}
|
2021-03-12 14:22:09 +08:00
|
|
|
timeTickResult := internalpb.TimeTickMsg{
|
2021-01-20 09:36:50 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-10 14:45:35 +08:00
|
|
|
MsgType: commonpb.MsgType_TimeTick,
|
2021-01-20 09:36:50 +08:00
|
|
|
MsgID: 0,
|
|
|
|
Timestamp: t,
|
2021-05-25 15:06:05 +08:00
|
|
|
SourceID: c.session.ServerID,
|
2021-01-20 09:36:50 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
timeTickMsg := &ms.TimeTickMsg{
|
|
|
|
BaseMsg: baseMsg,
|
|
|
|
TimeTickMsg: timeTickResult,
|
|
|
|
}
|
|
|
|
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
2021-03-25 14:41:46 +08:00
|
|
|
if err := timeTickStream.Broadcast(&msgPack); err != nil {
|
2021-01-20 09:36:50 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-06-17 16:47:57 +08:00
|
|
|
metrics.RootCoordDDChannelTimeTick.Set(float64(tsoutil.Mod24H(t)))
|
2021-06-04 15:00:34 +08:00
|
|
|
|
2021-06-11 16:39:29 +08:00
|
|
|
//c.dmlChannels.BroadcastAll(&msgPack)
|
2021-11-21 21:33:13 +08:00
|
|
|
pc := c.dmlChannels.ListPhysicalChannels()
|
2021-06-04 15:00:34 +08:00
|
|
|
pt := make([]uint64, len(pc))
|
|
|
|
for i := 0; i < len(pt); i++ {
|
|
|
|
pt[i] = t
|
|
|
|
}
|
|
|
|
ttMsg := internalpb.ChannelTimeTickMsg{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_TimeTick,
|
|
|
|
MsgID: 0, //TODO
|
|
|
|
Timestamp: t,
|
|
|
|
SourceID: c.session.ServerID,
|
|
|
|
},
|
2021-06-17 15:54:07 +08:00
|
|
|
ChannelNames: pc,
|
|
|
|
Timestamps: pt,
|
|
|
|
DefaultTimestamp: t,
|
2021-06-04 15:00:34 +08:00
|
|
|
}
|
2021-08-23 17:03:51 +08:00
|
|
|
//log.Debug("update timetick",
|
|
|
|
// zap.Any("DefaultTs", t),
|
|
|
|
// zap.Any("sourceID", c.session.ServerID),
|
|
|
|
// zap.Any("reason", reason))
|
2021-08-18 14:36:10 +08:00
|
|
|
return c.chanTimeTick.UpdateTimeTick(&ttMsg, reason)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
|
2021-09-27 18:10:00 +08:00
|
|
|
c.SendDdCreateCollectionReq = func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error) {
|
2021-01-20 09:36:50 +08:00
|
|
|
msgPack := ms.MsgPack{}
|
|
|
|
baseMsg := ms.BaseMsg{
|
2021-03-25 14:41:46 +08:00
|
|
|
Ctx: ctx,
|
2021-01-20 09:36:50 +08:00
|
|
|
BeginTimestamp: req.Base.Timestamp,
|
|
|
|
EndTimestamp: req.Base.Timestamp,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msg := &ms.CreateCollectionMsg{
|
2021-01-20 09:36:50 +08:00
|
|
|
BaseMsg: baseMsg,
|
|
|
|
CreateCollectionRequest: *req,
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
2021-09-27 18:10:00 +08:00
|
|
|
return c.dmlChannels.BroadcastMark(channelNames, &msgPack)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
|
2021-06-11 16:39:29 +08:00
|
|
|
c.SendDdDropCollectionReq = func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error {
|
2021-01-20 09:36:50 +08:00
|
|
|
msgPack := ms.MsgPack{}
|
|
|
|
baseMsg := ms.BaseMsg{
|
2021-03-25 14:41:46 +08:00
|
|
|
Ctx: ctx,
|
2021-01-20 09:36:50 +08:00
|
|
|
BeginTimestamp: req.Base.Timestamp,
|
|
|
|
EndTimestamp: req.Base.Timestamp,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msg := &ms.DropCollectionMsg{
|
2021-01-20 09:36:50 +08:00
|
|
|
BaseMsg: baseMsg,
|
|
|
|
DropCollectionRequest: *req,
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
2021-09-18 09:13:50 +08:00
|
|
|
return c.dmlChannels.Broadcast(channelNames, &msgPack)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
|
2021-06-11 16:39:29 +08:00
|
|
|
c.SendDdCreatePartitionReq = func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error {
|
2021-01-20 09:36:50 +08:00
|
|
|
msgPack := ms.MsgPack{}
|
|
|
|
baseMsg := ms.BaseMsg{
|
2021-03-25 14:41:46 +08:00
|
|
|
Ctx: ctx,
|
2021-01-20 09:36:50 +08:00
|
|
|
BeginTimestamp: req.Base.Timestamp,
|
|
|
|
EndTimestamp: req.Base.Timestamp,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msg := &ms.CreatePartitionMsg{
|
2021-01-20 09:36:50 +08:00
|
|
|
BaseMsg: baseMsg,
|
|
|
|
CreatePartitionRequest: *req,
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
2021-09-18 09:13:50 +08:00
|
|
|
return c.dmlChannels.Broadcast(channelNames, &msgPack)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
|
2021-06-11 16:39:29 +08:00
|
|
|
c.SendDdDropPartitionReq = func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error {
|
2021-01-20 09:36:50 +08:00
|
|
|
msgPack := ms.MsgPack{}
|
|
|
|
baseMsg := ms.BaseMsg{
|
2021-03-25 14:41:46 +08:00
|
|
|
Ctx: ctx,
|
2021-01-20 09:36:50 +08:00
|
|
|
BeginTimestamp: req.Base.Timestamp,
|
|
|
|
EndTimestamp: req.Base.Timestamp,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msg := &ms.DropPartitionMsg{
|
2021-01-20 09:36:50 +08:00
|
|
|
BaseMsg: baseMsg,
|
|
|
|
DropPartitionRequest: *req,
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
2021-09-18 09:13:50 +08:00
|
|
|
return c.dmlChannels.Broadcast(channelNames, &msgPack)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// SetNewProxyClient set client to create proxy
|
2021-06-22 19:08:03 +08:00
|
|
|
func (c *Core) SetNewProxyClient(f func(sess *sessionutil.Session) (types.Proxy, error)) {
|
2021-05-26 20:14:30 +08:00
|
|
|
if c.NewProxyClient == nil {
|
|
|
|
c.NewProxyClient = f
|
|
|
|
} else {
|
2021-06-01 11:04:31 +08:00
|
|
|
log.Debug("NewProxyClient has already set")
|
2021-05-26 20:14:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// SetDataCoord set datacoord
|
2021-06-21 18:22:13 +08:00
|
|
|
func (c *Core) SetDataCoord(ctx context.Context, s types.DataCoord) error {
|
2021-06-25 16:48:10 +08:00
|
|
|
initCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
if err := s.Init(); err == nil {
|
|
|
|
if err := s.Start(); err == nil {
|
|
|
|
close(initCh)
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord connected to DataCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("Retrying RootCoord connection to DataCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
}
|
|
|
|
}()
|
2021-06-30 16:18:13 +08:00
|
|
|
c.CallGetBinlogFilePathsService = func(ctx context.Context, segID typeutil.UniqueID, fieldID typeutil.UniqueID) (retFiles []string, retErr error) {
|
2021-05-26 20:14:30 +08:00
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("get bin log file paths panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh //wait connect to data coord
|
2021-05-20 14:14:14 +08:00
|
|
|
ts, err := c.TSOAllocator(1)
|
2021-01-24 20:26:35 +08:00
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return nil, err
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-03-12 14:22:09 +08:00
|
|
|
binlog, err := s.GetInsertBinlogPaths(ctx, &datapb.GetInsertBinlogPathsRequest{
|
2021-01-24 20:26:35 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-08 15:46:51 +08:00
|
|
|
MsgType: 0, //TODO, msg type
|
2021-01-24 20:26:35 +08:00
|
|
|
MsgID: 0,
|
|
|
|
Timestamp: ts,
|
2021-05-25 15:06:05 +08:00
|
|
|
SourceID: c.session.ServerID,
|
2021-01-24 20:26:35 +08:00
|
|
|
},
|
|
|
|
SegmentID: segID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return nil, err
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-03-10 22:06:22 +08:00
|
|
|
if binlog.Status.ErrorCode != commonpb.ErrorCode_Success {
|
2021-11-16 14:25:17 +08:00
|
|
|
return nil, fmt.Errorf("getInsertBinlogPaths from data service failed, error = %s", binlog.Status.Reason)
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
|
|
|
for i := range binlog.FieldIDs {
|
|
|
|
if binlog.FieldIDs[i] == fieldID {
|
2021-09-07 11:16:37 +08:00
|
|
|
return binlog.Paths[i].Values, nil
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
return nil, fmt.Errorf("binlog file does not exist, segment id = %d, field id = %d", segID, fieldID)
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-03-08 15:46:51 +08:00
|
|
|
|
2021-06-30 16:18:13 +08:00
|
|
|
c.CallGetNumRowsService = func(ctx context.Context, segID typeutil.UniqueID, isFromFlushedChan bool) (retRows int64, retErr error) {
|
2021-05-26 20:14:30 +08:00
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("get num rows panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh
|
2021-05-20 14:14:14 +08:00
|
|
|
ts, err := c.TSOAllocator(1)
|
2021-03-08 15:46:51 +08:00
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return retRows, err
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-03-12 14:22:09 +08:00
|
|
|
segInfo, err := s.GetSegmentInfo(ctx, &datapb.GetSegmentInfoRequest{
|
2021-03-08 15:46:51 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: 0, //TODO, msg type
|
|
|
|
MsgID: 0,
|
|
|
|
Timestamp: ts,
|
2021-05-25 15:06:05 +08:00
|
|
|
SourceID: c.session.ServerID,
|
2021-03-08 15:46:51 +08:00
|
|
|
},
|
|
|
|
SegmentIDs: []typeutil.UniqueID{segID},
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return retRows, err
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-03-10 22:06:22 +08:00
|
|
|
if segInfo.Status.ErrorCode != commonpb.ErrorCode_Success {
|
2021-11-16 14:25:17 +08:00
|
|
|
return retRows, fmt.Errorf("getSegmentInfo from data service failed, error = %s", segInfo.Status.Reason)
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
|
|
|
if len(segInfo.Infos) != 1 {
|
|
|
|
log.Debug("get segment info empty")
|
2021-09-07 11:16:37 +08:00
|
|
|
return retRows, nil
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-03-13 17:05:36 +08:00
|
|
|
if !isFromFlushedChan && segInfo.Infos[0].State != commonpb.SegmentState_Flushed {
|
2021-03-08 15:46:51 +08:00
|
|
|
log.Debug("segment id not flushed", zap.Int64("segment id", segID))
|
2021-09-07 11:16:37 +08:00
|
|
|
return retRows, nil
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return segInfo.Infos[0].NumOfRows, nil
|
2021-03-08 15:46:51 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
|
|
|
|
c.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) (retSegIDs []typeutil.UniqueID, retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("get flushed segments from data coord panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-initCh
|
|
|
|
req := &datapb.GetFlushedSegmentsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: 0, //TODO,msg type
|
|
|
|
MsgID: 0,
|
|
|
|
Timestamp: 0,
|
|
|
|
SourceID: c.session.ServerID,
|
|
|
|
},
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
}
|
|
|
|
rsp, err := s.GetFlushedSegments(ctx, req)
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return retSegIDs, err
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
|
|
|
if rsp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
2021-09-07 11:16:37 +08:00
|
|
|
return retSegIDs, fmt.Errorf("get flushed segments from data coord failed, reason = %s", rsp.Status.Reason)
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return rsp.Segments, nil
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
|
|
|
|
2021-11-11 00:54:45 +08:00
|
|
|
c.CallWatchChannels = func(ctx context.Context, collectionID int64, channelNames []string) (retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("watch channels panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
<-initCh
|
|
|
|
req := &datapb.WatchChannelsRequest{
|
|
|
|
CollectionID: collectionID,
|
|
|
|
ChannelNames: channelNames,
|
|
|
|
}
|
|
|
|
rsp, err := s.WatchChannels(ctx, req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if rsp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
|
|
|
return fmt.Errorf("data coord watch channels failed, reason = %s", rsp.Status.Reason)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2021-01-24 20:26:35 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// SetIndexCoord set indexcoord
|
2021-06-21 17:28:03 +08:00
|
|
|
func (c *Core) SetIndexCoord(s types.IndexCoord) error {
|
2021-06-25 16:48:10 +08:00
|
|
|
initCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
if err := s.Init(); err == nil {
|
|
|
|
if err := s.Start(); err == nil {
|
|
|
|
close(initCh)
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord connected to IndexCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("Retrying RootCoord connection to IndexCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-05-26 20:14:30 +08:00
|
|
|
c.CallBuildIndexService = func(ctx context.Context, binlog []string, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo) (retID typeutil.UniqueID, retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("build index panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh
|
2021-02-26 17:44:24 +08:00
|
|
|
rsp, err := s.BuildIndex(ctx, &indexpb.BuildIndexRequest{
|
2021-01-24 20:26:35 +08:00
|
|
|
DataPaths: binlog,
|
2021-05-15 18:08:08 +08:00
|
|
|
TypeParams: field.TypeParams,
|
|
|
|
IndexParams: idxInfo.IndexParams,
|
|
|
|
IndexID: idxInfo.IndexID,
|
|
|
|
IndexName: idxInfo.IndexName,
|
2021-01-24 20:26:35 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return retID, err
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-03-10 22:06:22 +08:00
|
|
|
if rsp.Status.ErrorCode != commonpb.ErrorCode_Success {
|
2021-11-16 14:25:17 +08:00
|
|
|
return retID, fmt.Errorf("buildIndex from index service failed, error = %s", rsp.Status.Reason)
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return rsp.IndexBuildID, nil
|
2021-01-24 20:26:35 +08:00
|
|
|
}
|
2021-02-20 15:38:44 +08:00
|
|
|
|
2021-05-26 20:14:30 +08:00
|
|
|
c.CallDropIndexService = func(ctx context.Context, indexID typeutil.UniqueID) (retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("drop index from index service panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh
|
2021-02-26 17:44:24 +08:00
|
|
|
rsp, err := s.DropIndex(ctx, &indexpb.DropIndexRequest{
|
2021-02-20 15:38:44 +08:00
|
|
|
IndexID: indexID,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return err
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
2021-03-10 22:06:22 +08:00
|
|
|
if rsp.ErrorCode != commonpb.ErrorCode_Success {
|
2021-09-07 11:16:37 +08:00
|
|
|
return fmt.Errorf(rsp.Reason)
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
|
2021-01-24 20:26:35 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// SetQueryCoord set querycoord
|
2021-06-22 16:44:09 +08:00
|
|
|
func (c *Core) SetQueryCoord(s types.QueryCoord) error {
|
2021-06-25 16:48:10 +08:00
|
|
|
initCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
if err := s.Init(); err == nil {
|
|
|
|
if err := s.Start(); err == nil {
|
|
|
|
close(initCh)
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord connected to QueryCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("Retrying RootCoord connection to QueryCoord")
|
2021-06-25 16:48:10 +08:00
|
|
|
}
|
|
|
|
}()
|
2021-05-26 20:14:30 +08:00
|
|
|
c.CallReleaseCollectionService = func(ctx context.Context, ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) (retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("release collection from query service panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh
|
2021-02-05 14:09:55 +08:00
|
|
|
req := &querypb.ReleaseCollectionRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-10 14:45:35 +08:00
|
|
|
MsgType: commonpb.MsgType_ReleaseCollection,
|
2021-02-05 14:09:55 +08:00
|
|
|
MsgID: 0, //TODO, msg ID
|
|
|
|
Timestamp: ts,
|
2021-05-25 15:06:05 +08:00
|
|
|
SourceID: c.session.ServerID,
|
2021-02-05 14:09:55 +08:00
|
|
|
},
|
|
|
|
DbID: dbID,
|
|
|
|
CollectionID: collectionID,
|
|
|
|
}
|
2021-02-26 17:44:24 +08:00
|
|
|
rsp, err := s.ReleaseCollection(ctx, req)
|
2021-02-05 14:09:55 +08:00
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return err
|
2021-02-05 14:09:55 +08:00
|
|
|
}
|
2021-03-10 22:06:22 +08:00
|
|
|
if rsp.ErrorCode != commonpb.ErrorCode_Success {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("releaseCollection from query service failed, error = %s", rsp.Reason)
|
2021-02-05 14:09:55 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return nil
|
2021-02-05 14:09:55 +08:00
|
|
|
}
|
2021-06-22 16:08:08 +08:00
|
|
|
c.CallReleasePartitionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) (retErr error) {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
retErr = fmt.Errorf("release partition from query service panic, msg = %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2021-06-25 16:48:10 +08:00
|
|
|
<-initCh
|
2021-06-22 16:08:08 +08:00
|
|
|
req := &querypb.ReleasePartitionsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_ReleasePartitions,
|
|
|
|
MsgID: 0, //TODO, msg ID
|
|
|
|
Timestamp: ts,
|
|
|
|
SourceID: c.session.ServerID,
|
|
|
|
},
|
|
|
|
DbID: dbID,
|
|
|
|
CollectionID: collectionID,
|
|
|
|
PartitionIDs: partitionIDs,
|
|
|
|
}
|
|
|
|
rsp, err := s.ReleasePartitions(ctx, req)
|
|
|
|
if err != nil {
|
2021-09-07 11:16:37 +08:00
|
|
|
return err
|
2021-06-22 16:08:08 +08:00
|
|
|
}
|
|
|
|
if rsp.ErrorCode != commonpb.ErrorCode_Success {
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("releasePartitions from query service failed, error = %s", rsp.Reason)
|
2021-06-22 16:08:08 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
return nil
|
2021-06-22 16:08:08 +08:00
|
|
|
}
|
2021-02-05 14:09:55 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-05-15 18:08:08 +08:00
|
|
|
// BuildIndex will check row num and call build index service
|
2021-06-30 16:18:13 +08:00
|
|
|
func (c *Core) BuildIndex(ctx context.Context, segID typeutil.UniqueID, field *schemapb.FieldSchema, idxInfo *etcdpb.IndexInfo, isFlush bool) (typeutil.UniqueID, error) {
|
2021-11-15 19:26:49 +08:00
|
|
|
log.Debug("start build index", zap.String("index name", idxInfo.IndexName),
|
|
|
|
zap.String("field name", field.Name), zap.Int64("segment id", segID))
|
2021-06-30 16:18:13 +08:00
|
|
|
sp, ctx := trace.StartSpanFromContext(ctx)
|
|
|
|
defer sp.Finish()
|
2021-05-15 18:08:08 +08:00
|
|
|
if c.MetaTable.IsSegmentIndexed(segID, field, idxInfo.IndexParams) {
|
2021-05-24 14:19:52 +08:00
|
|
|
return 0, nil
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
2021-06-30 16:18:13 +08:00
|
|
|
rows, err := c.CallGetNumRowsService(ctx, segID, isFlush)
|
2021-05-15 18:08:08 +08:00
|
|
|
if err != nil {
|
2021-05-24 14:19:52 +08:00
|
|
|
return 0, err
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
|
|
|
var bldID typeutil.UniqueID
|
|
|
|
if rows < Params.MinSegmentSizeToEnableIndex {
|
|
|
|
log.Debug("num of rows is less than MinSegmentSizeToEnableIndex", zap.Int64("num rows", rows))
|
|
|
|
} else {
|
2021-06-30 16:18:13 +08:00
|
|
|
binlogs, err := c.CallGetBinlogFilePathsService(ctx, segID, field.FieldID)
|
2021-05-15 18:08:08 +08:00
|
|
|
if err != nil {
|
2021-05-24 14:19:52 +08:00
|
|
|
return 0, err
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
2021-06-30 16:18:13 +08:00
|
|
|
bldID, err = c.CallBuildIndexService(ctx, binlogs, field, idxInfo)
|
2021-05-15 18:08:08 +08:00
|
|
|
if err != nil {
|
2021-05-24 14:19:52 +08:00
|
|
|
return 0, err
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
2021-11-15 19:26:49 +08:00
|
|
|
|
|
|
|
log.Debug("CallBuildIndex finished", zap.String("index name", idxInfo.IndexName),
|
|
|
|
zap.String("field name", field.Name), zap.Int64("segment id", segID), zap.Int64("num rows", rows))
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
2021-05-24 14:19:52 +08:00
|
|
|
return bldID, nil
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
|
|
|
|
2021-06-17 16:47:57 +08:00
|
|
|
// Register register rootcoord at etcd
|
2021-05-25 15:06:05 +08:00
|
|
|
func (c *Core) Register() error {
|
2021-06-11 22:04:41 +08:00
|
|
|
c.session = sessionutil.NewSession(c.ctx, Params.MetaRootPath, Params.EtcdEndpoints)
|
2021-06-03 19:01:33 +08:00
|
|
|
if c.session == nil {
|
2021-10-13 10:50:41 +08:00
|
|
|
return fmt.Errorf("session is nil, the etcd client connection may have failed")
|
2021-06-03 19:01:33 +08:00
|
|
|
}
|
2021-10-14 16:40:35 +08:00
|
|
|
c.session.Init(typeutil.RootCoordRole, Params.Address, true)
|
2021-10-01 08:52:50 +08:00
|
|
|
Params.SetLogger(typeutil.UniqueID(-1))
|
2021-05-25 15:06:05 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// Init initialize routine
|
2021-01-24 20:26:35 +08:00
|
|
|
func (c *Core) Init() error {
|
2021-01-19 14:44:03 +08:00
|
|
|
var initError error = nil
|
2021-09-15 22:05:49 +08:00
|
|
|
if c.kvBaseCreate == nil {
|
|
|
|
c.kvBaseCreate = func(root string) (kv.TxnKV, error) {
|
|
|
|
return etcdkv.NewEtcdKV(Params.EtcdEndpoints, root)
|
|
|
|
}
|
|
|
|
}
|
2021-01-19 14:44:03 +08:00
|
|
|
c.initOnce.Do(func() {
|
2021-02-26 15:17:47 +08:00
|
|
|
connectEtcdFn := func() error {
|
2021-06-11 22:04:41 +08:00
|
|
|
if c.etcdCli, initError = clientv3.New(clientv3.Config{Endpoints: Params.EtcdEndpoints, DialTimeout: 5 * time.Second}); initError != nil {
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Error("RootCoord failed to new Etcd client", zap.Any("reason", initError))
|
2021-02-26 15:17:47 +08:00
|
|
|
return initError
|
|
|
|
}
|
2021-09-15 22:05:49 +08:00
|
|
|
if c.kvBase, initError = c.kvBaseCreate(Params.KvRootPath); initError != nil {
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError))
|
2021-09-15 22:05:49 +08:00
|
|
|
return initError
|
|
|
|
}
|
|
|
|
var metaKV kv.TxnKV
|
|
|
|
metaKV, initError = c.kvBaseCreate(Params.MetaRootPath)
|
2021-05-18 14:18:02 +08:00
|
|
|
if initError != nil {
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Error("RootCoord failed to new EtcdKV", zap.Any("reason", initError))
|
2021-05-18 14:18:02 +08:00
|
|
|
return initError
|
|
|
|
}
|
2021-09-15 22:05:49 +08:00
|
|
|
var ss *suffixSnapshot
|
|
|
|
if ss, initError = newSuffixSnapshot(metaKV, "_ts", Params.MetaRootPath, "snapshots"); initError != nil {
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Error("RootCoord failed to new suffixSnapshot", zap.Error(initError))
|
2021-02-26 15:17:47 +08:00
|
|
|
return initError
|
|
|
|
}
|
2021-10-21 14:04:36 +08:00
|
|
|
if c.MetaTable, initError = NewMetaTable(metaKV, ss); initError != nil {
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Error("RootCoord failed to new MetaTable", zap.Any("reason", initError))
|
2021-08-13 11:04:09 +08:00
|
|
|
return initError
|
|
|
|
}
|
|
|
|
|
2021-02-26 15:17:47 +08:00
|
|
|
return nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord, Connecting to Etcd", zap.String("kv root", Params.KvRootPath), zap.String("meta root", Params.MetaRootPath))
|
2021-06-23 09:24:10 +08:00
|
|
|
err := retry.Do(c.ctx, connectEtcdFn, retry.Attempts(300))
|
2021-02-26 15:17:47 +08:00
|
|
|
if err != nil {
|
2021-01-19 14:44:03 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-10-13 10:50:41 +08:00
|
|
|
log.Debug("RootCoord, Setting TSO and ID Allocator")
|
2021-08-13 11:04:09 +08:00
|
|
|
kv, initError := tsoutil.NewTSOKVBase(Params.EtcdEndpoints, Params.KvRootPath, "gid")
|
|
|
|
if initError != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
idAllocator := allocator.NewGlobalIDAllocator("idTimestamp", kv)
|
2021-04-08 17:31:39 +08:00
|
|
|
if initError = idAllocator.Initialize(); initError != nil {
|
2021-01-19 14:44:03 +08:00
|
|
|
return
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
c.IDAllocator = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
2021-04-08 17:31:39 +08:00
|
|
|
return idAllocator.Alloc(count)
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
c.IDAllocatorUpdate = func() error {
|
2021-04-08 17:31:39 +08:00
|
|
|
return idAllocator.UpdateID()
|
|
|
|
}
|
|
|
|
|
2021-08-13 11:04:09 +08:00
|
|
|
kv, initError = tsoutil.NewTSOKVBase(Params.EtcdEndpoints, Params.KvRootPath, "tso")
|
|
|
|
if initError != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tsoAllocator := tso.NewGlobalTSOAllocator("timestamp", kv)
|
2021-04-08 17:31:39 +08:00
|
|
|
if initError = tsoAllocator.Initialize(); initError != nil {
|
2021-01-19 14:44:03 +08:00
|
|
|
return
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
c.TSOAllocator = func(count uint32) (typeutil.Timestamp, error) {
|
2021-04-08 17:31:39 +08:00
|
|
|
return tsoAllocator.Alloc(count)
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
c.TSOAllocatorUpdate = func() error {
|
2021-04-08 17:31:39 +08:00
|
|
|
return tsoAllocator.UpdateTSO()
|
|
|
|
}
|
|
|
|
|
2021-05-21 16:08:12 +08:00
|
|
|
m := map[string]interface{}{
|
|
|
|
"PulsarAddress": Params.PulsarAddress,
|
|
|
|
"ReceiveBufSize": 1024,
|
|
|
|
"PulsarBufSize": 1024}
|
|
|
|
if initError = c.msFactory.SetParams(m); initError != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
|
2021-11-03 21:04:14 +08:00
|
|
|
// initialize dml channels used for insert
|
2021-09-18 09:13:50 +08:00
|
|
|
c.dmlChannels = newDmlChannels(c, Params.DmlChannelName, Params.DmlChannelNum)
|
|
|
|
|
2021-11-03 21:04:14 +08:00
|
|
|
// initialize delta channels used for delete, share Params.DmlChannelNum with dmlChannels
|
|
|
|
c.deltaChannels = newDmlChannels(c, Params.DeltaChannelName, Params.DmlChannelNum)
|
|
|
|
|
2021-09-18 09:13:50 +08:00
|
|
|
// recover physical channels for all collections
|
2021-11-02 15:50:30 +08:00
|
|
|
chanMap := c.MetaTable.ListCollectionPhysicalChannels()
|
|
|
|
for collID, chanNames := range chanMap {
|
|
|
|
c.dmlChannels.AddProducerChannels(chanNames...)
|
2021-11-03 21:04:14 +08:00
|
|
|
log.Debug("recover physical channels", zap.Int64("collID", collID), zap.Any("physical channels", chanNames))
|
|
|
|
|
|
|
|
// TODO: convert physical channel name to delta channel name
|
|
|
|
for _, chanName := range chanNames {
|
|
|
|
deltaChanName, err := ConvertChannelName(chanName, Params.DmlChannelName, Params.DeltaChannelName)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to convert dml channel name to delta channel name", zap.String("chanName", chanName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.deltaChannels.AddProducerChannels(deltaChanName)
|
|
|
|
log.Debug("recover delta channels", zap.Int64("collID", collID), zap.String("deltaChanName", deltaChanName))
|
|
|
|
}
|
2021-11-02 15:50:30 +08:00
|
|
|
}
|
2021-06-04 15:00:34 +08:00
|
|
|
|
2021-05-26 20:14:30 +08:00
|
|
|
c.chanTimeTick = newTimeTickSync(c)
|
2021-06-22 19:08:03 +08:00
|
|
|
c.chanTimeTick.AddProxy(c.session)
|
2021-05-26 20:14:30 +08:00
|
|
|
c.proxyClientManager = newProxyClientManager(c)
|
|
|
|
|
2021-06-22 16:08:08 +08:00
|
|
|
log.Debug("RootCoord, set proxy manager")
|
2021-06-22 19:08:03 +08:00
|
|
|
c.proxyManager, initError = newProxyManager(
|
2021-05-26 20:14:30 +08:00
|
|
|
c.ctx,
|
2021-06-11 22:04:41 +08:00
|
|
|
Params.EtcdEndpoints,
|
2021-06-22 19:08:03 +08:00
|
|
|
c.chanTimeTick.GetProxy,
|
2021-05-26 20:14:30 +08:00
|
|
|
c.proxyClientManager.GetProxyClients,
|
|
|
|
)
|
2021-08-13 11:04:09 +08:00
|
|
|
if initError != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-06-22 19:08:03 +08:00
|
|
|
c.proxyManager.AddSession(c.chanTimeTick.AddProxy, c.proxyClientManager.AddProxyClient)
|
|
|
|
c.proxyManager.DelSession(c.chanTimeTick.DelProxy, c.proxyClientManager.DelProxyClient)
|
2021-05-21 16:08:12 +08:00
|
|
|
|
2021-09-03 17:15:26 +08:00
|
|
|
c.metricsCacheManager = metricsinfo.NewMetricsCacheManager()
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
initError = c.setMsgStreams()
|
2021-08-13 11:04:09 +08:00
|
|
|
if initError != nil {
|
|
|
|
return
|
|
|
|
}
|
2021-01-19 14:44:03 +08:00
|
|
|
})
|
2021-11-19 12:11:12 +08:00
|
|
|
if initError != nil {
|
2021-06-22 16:08:08 +08:00
|
|
|
log.Debug("RootCoord init error", zap.Error(initError))
|
2021-01-26 19:24:09 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
log.Debug("RootCoord init done")
|
2021-01-19 14:44:03 +08:00
|
|
|
return initError
|
|
|
|
}
|
|
|
|
|
2021-09-07 11:16:37 +08:00
|
|
|
func (c *Core) reSendDdMsg(ctx context.Context, force bool) error {
|
|
|
|
if !force {
|
2021-10-21 14:04:36 +08:00
|
|
|
flag, err := c.MetaTable.txn.Load(DDMsgSendPrefix)
|
2021-09-07 11:16:37 +08:00
|
|
|
if err != nil || flag == "true" {
|
|
|
|
log.Debug("No un-successful DdMsg")
|
|
|
|
return nil
|
|
|
|
}
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
|
|
|
|
2021-10-21 14:04:36 +08:00
|
|
|
ddOpStr, err := c.MetaTable.txn.Load(DDOperationPrefix)
|
2021-05-14 21:26:06 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("DdOperation key does not exist")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var ddOp DdOperation
|
|
|
|
if err = json.Unmarshal([]byte(ddOpStr), &ddOp); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-09-07 11:16:37 +08:00
|
|
|
var invalidateCache bool
|
|
|
|
var ts typeutil.Timestamp
|
|
|
|
var dbName, collName string
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
switch ddOp.Type {
|
2021-09-27 18:10:00 +08:00
|
|
|
// TODO remove create collection resend
|
|
|
|
// since create collection needs a start position to succeed
|
2021-05-14 21:26:06 +08:00
|
|
|
case CreateCollectionDDType:
|
2021-07-06 09:16:03 +08:00
|
|
|
var ddReq = internalpb.CreateCollectionRequest{}
|
2021-09-23 10:37:54 +08:00
|
|
|
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-07-06 09:16:03 +08:00
|
|
|
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
2021-06-11 16:39:29 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-27 18:10:00 +08:00
|
|
|
if _, err = c.SendDdCreateCollectionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
invalidateCache = false
|
2021-05-14 21:26:06 +08:00
|
|
|
case DropCollectionDDType:
|
|
|
|
var ddReq = internalpb.DropCollectionRequest{}
|
2021-09-23 10:37:54 +08:00
|
|
|
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
ts = ddReq.Base.Timestamp
|
|
|
|
dbName, collName = ddReq.DbName, ddReq.CollectionName
|
2021-06-11 16:39:29 +08:00
|
|
|
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = c.SendDdDropCollectionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
invalidateCache = true
|
2021-05-14 21:26:06 +08:00
|
|
|
case CreatePartitionDDType:
|
|
|
|
var ddReq = internalpb.CreatePartitionRequest{}
|
2021-09-23 10:37:54 +08:00
|
|
|
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
ts = ddReq.Base.Timestamp
|
|
|
|
dbName, collName = ddReq.DbName, ddReq.CollectionName
|
2021-06-11 16:39:29 +08:00
|
|
|
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err == nil {
|
|
|
|
return fmt.Errorf("partition %s already created", ddReq.PartitionName)
|
|
|
|
}
|
2021-06-11 16:39:29 +08:00
|
|
|
if err = c.SendDdCreatePartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
invalidateCache = true
|
2021-05-14 21:26:06 +08:00
|
|
|
case DropPartitionDDType:
|
|
|
|
var ddReq = internalpb.DropPartitionRequest{}
|
2021-09-23 10:37:54 +08:00
|
|
|
if err = proto.Unmarshal(ddOp.Body, &ddReq); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
ts = ddReq.Base.Timestamp
|
|
|
|
dbName, collName = ddReq.DbName, ddReq.CollectionName
|
2021-06-11 16:39:29 +08:00
|
|
|
collInfo, err := c.MetaTable.GetCollectionByName(ddReq.CollectionName, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
if _, err = c.MetaTable.GetPartitionByName(collInfo.ID, ddReq.PartitionName, 0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-11 16:39:29 +08:00
|
|
|
if err = c.SendDdDropPartitionReq(ctx, &ddReq, collInfo.PhysicalChannelNames); err != nil {
|
2021-05-14 21:26:06 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
invalidateCache = true
|
|
|
|
default:
|
2021-11-16 14:25:17 +08:00
|
|
|
return fmt.Errorf("invalid DdOperation %s", ddOp.Type)
|
2021-09-07 11:16:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if invalidateCache {
|
2021-05-26 20:14:30 +08:00
|
|
|
req := proxypb.InvalidateCollMetaCacheRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: 0, //TODO, msg type
|
|
|
|
MsgID: 0, //TODO, msg id
|
2021-09-07 11:16:37 +08:00
|
|
|
Timestamp: ts,
|
2021-05-26 20:14:30 +08:00
|
|
|
SourceID: c.session.ServerID,
|
|
|
|
},
|
2021-09-07 11:16:37 +08:00
|
|
|
DbName: dbName,
|
|
|
|
CollectionName: collName,
|
2021-05-26 20:14:30 +08:00
|
|
|
}
|
|
|
|
c.proxyClientManager.InvalidateCollectionMetaCache(c.ctx, &req)
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update DDOperation in etcd
|
|
|
|
return c.setDdMsgSendFlag(true)
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// Start start rootcoord
|
2021-01-19 14:44:03 +08:00
|
|
|
func (c *Core) Start() error {
|
|
|
|
if err := c.checkInit(); err != nil {
|
2021-06-17 16:47:57 +08:00
|
|
|
log.Debug("RootCoord Start checkInit failed", zap.Error(err))
|
2021-01-19 14:44:03 +08:00
|
|
|
return err
|
|
|
|
}
|
2021-04-08 17:31:39 +08:00
|
|
|
|
2021-06-17 16:47:57 +08:00
|
|
|
log.Debug(typeutil.RootCoordRole, zap.Int64("node id", c.session.ServerID))
|
|
|
|
log.Debug(typeutil.RootCoordRole, zap.String("time tick channel name", Params.TimeTickChannel))
|
2021-04-08 17:31:39 +08:00
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
c.startOnce.Do(func() {
|
2021-06-22 19:08:03 +08:00
|
|
|
if err := c.proxyManager.WatchProxy(); err != nil {
|
2021-10-27 22:00:27 +08:00
|
|
|
log.Fatal("RootCoord Start WatchProxy failed", zap.Error(err))
|
|
|
|
// you can not just stuck here,
|
|
|
|
panic(err)
|
2021-05-26 20:14:30 +08:00
|
|
|
}
|
2021-09-07 11:16:37 +08:00
|
|
|
if err := c.reSendDdMsg(c.ctx, false); err != nil {
|
2021-10-27 22:00:27 +08:00
|
|
|
log.Fatal("RootCoord Start reSendDdMsg failed", zap.Error(err))
|
|
|
|
panic(err)
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
2021-10-14 16:40:35 +08:00
|
|
|
c.wg.Add(4)
|
2021-01-19 14:44:03 +08:00
|
|
|
go c.startTimeTickLoop()
|
2021-01-27 16:38:18 +08:00
|
|
|
go c.tsLoop()
|
2021-09-17 12:37:50 +08:00
|
|
|
go c.chanTimeTick.StartWatch(&c.wg)
|
2021-07-03 17:54:25 +08:00
|
|
|
go c.checkFlushedSegmentsLoop()
|
2021-10-14 16:40:35 +08:00
|
|
|
go c.session.LivenessCheck(c.ctx, func() {
|
2021-10-27 21:58:33 +08:00
|
|
|
log.Error("Root Coord disconnected from etcd, process will exit", zap.Int64("Server Id", c.session.ServerID))
|
2021-10-30 10:24:38 +08:00
|
|
|
if err := c.Stop(); err != nil {
|
|
|
|
log.Fatal("failed to stop server", zap.Error(err))
|
|
|
|
}
|
2021-11-22 16:23:17 +08:00
|
|
|
// manually send signal to starter goroutine
|
|
|
|
syscall.Kill(syscall.Getpid(), syscall.SIGINT)
|
2021-10-14 16:40:35 +08:00
|
|
|
})
|
2021-09-17 21:52:00 +08:00
|
|
|
Params.CreatedTime = time.Now()
|
|
|
|
Params.UpdatedTime = time.Now()
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
c.stateCode.Store(internalpb.StateCode_Healthy)
|
2021-10-27 22:00:27 +08:00
|
|
|
log.Debug(typeutil.RootCoordRole+" start successfully ", zap.String("State Code", internalpb.StateCode_name[int32(internalpb.StateCode_Healthy)]))
|
2021-01-19 14:44:03 +08:00
|
|
|
})
|
2021-09-17 21:52:00 +08:00
|
|
|
|
2021-01-19 14:44:03 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// Stop stop rootcoord
|
2021-01-19 14:44:03 +08:00
|
|
|
func (c *Core) Stop() error {
|
|
|
|
c.cancel()
|
2021-09-17 12:37:50 +08:00
|
|
|
c.wg.Wait()
|
2021-03-12 14:22:09 +08:00
|
|
|
c.stateCode.Store(internalpb.StateCode_Abnormal)
|
2021-11-16 22:31:14 +08:00
|
|
|
// wait at most one second to revoke
|
|
|
|
c.session.Revoke(time.Second)
|
2021-01-19 14:44:03 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetComponentStates get states of components
|
2021-03-12 14:22:09 +08:00
|
|
|
func (c *Core) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
|
|
|
|
code := c.stateCode.Load().(internalpb.StateCode)
|
|
|
|
log.Debug("GetComponentStates", zap.String("State Code", internalpb.StateCode_name[int32(code)]))
|
2021-01-26 19:24:09 +08:00
|
|
|
|
2021-11-19 13:57:12 +08:00
|
|
|
nodeID := common.NotRegisteredID
|
|
|
|
if c.session != nil && c.session.Registered() {
|
|
|
|
nodeID = c.session.ServerID
|
|
|
|
}
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
return &internalpb.ComponentStates{
|
|
|
|
State: &internalpb.ComponentInfo{
|
2021-11-19 13:57:12 +08:00
|
|
|
// NodeID: c.session.ServerID, // will race with Core.Register()
|
|
|
|
NodeID: nodeID,
|
2021-06-17 16:47:57 +08:00
|
|
|
Role: typeutil.RootCoordRole,
|
2021-01-20 11:02:29 +08:00
|
|
|
StateCode: code,
|
|
|
|
ExtraInfo: nil,
|
2021-01-19 14:44:03 +08:00
|
|
|
},
|
2021-01-26 17:47:38 +08:00
|
|
|
Status: &commonpb.Status{
|
2021-03-10 22:06:22 +08:00
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
2021-01-26 17:47:38 +08:00
|
|
|
Reason: "",
|
|
|
|
},
|
2021-03-12 14:22:09 +08:00
|
|
|
SubcomponentStates: []*internalpb.ComponentInfo{
|
2021-01-26 17:47:38 +08:00
|
|
|
{
|
2021-11-19 13:57:12 +08:00
|
|
|
NodeID: nodeID,
|
2021-06-17 16:47:57 +08:00
|
|
|
Role: typeutil.RootCoordRole,
|
2021-01-26 17:47:38 +08:00
|
|
|
StateCode: code,
|
|
|
|
ExtraInfo: nil,
|
|
|
|
},
|
|
|
|
},
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetTimeTickChannel get timetick channel name
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
|
|
|
return &milvuspb.StringResponse{
|
|
|
|
Status: &commonpb.Status{
|
2021-03-10 22:06:22 +08:00
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
2021-02-26 17:44:24 +08:00
|
|
|
Reason: "",
|
|
|
|
},
|
|
|
|
Value: Params.TimeTickChannel,
|
|
|
|
}, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetStatisticsChannel get statistics channel name
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
|
|
|
return &milvuspb.StringResponse{
|
|
|
|
Status: &commonpb.Status{
|
2021-03-10 22:06:22 +08:00
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
2021-02-26 17:44:24 +08:00
|
|
|
Reason: "",
|
|
|
|
},
|
|
|
|
Value: Params.StatisticsChannel,
|
|
|
|
}, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// CreateCollection create collection
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreateCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-01-25 18:33:10 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateCollection", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &CreateCollectionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("CreateCollection failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "CreateCollection failed: "+err.Error()), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateCollection success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreateCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DropCollection drop collection
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-01-25 18:33:10 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropCollection", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &DropCollectionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DropCollection failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "DropCollection failed: "+err.Error()), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropCollection success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// HasCollection check collection existence
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordHasCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-01-25 18:33:10 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
|
|
|
Value: false,
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("HasCollection", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &HasCollectionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
HasCollection: false,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("HasCollection failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-01-19 14:44:03 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()),
|
2021-11-19 12:11:12 +08:00
|
|
|
Value: false,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("HasCollection success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordHasCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-01-19 14:44:03 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: succStatus(),
|
|
|
|
Value: t.HasCollection,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DescribeCollection return collection info
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-01-25 18:33:10 +08:00
|
|
|
return &milvuspb.DescribeCollectionResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode"+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeCollection", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &DescribeCollectionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
Rsp: &milvuspb.DescribeCollectionResponse{},
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DescribeCollection failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-01-19 14:44:03 +08:00
|
|
|
return &milvuspb.DescribeCollectionResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeCollection failed: "+err.Error()),
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeCollection success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeCollectionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-19 14:44:03 +08:00
|
|
|
return t.Rsp, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ShowCollections list all collection names
|
2021-03-12 14:22:09 +08:00
|
|
|
func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowCollectionsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowCollectionsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowCollections", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("dbname", in.DbName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &ShowCollectionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
2021-11-19 12:11:12 +08:00
|
|
|
Rsp: &milvuspb.ShowCollectionsResponse{},
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("ShowCollections failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("dbname", in.DbName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowCollectionsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()),
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowCollections success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("dbname", in.DbName), zap.Int("num of collections", len(t.Rsp.CollectionNames)),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowCollectionsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-19 14:44:03 +08:00
|
|
|
return t.Rsp, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// CreatePartition create partition
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreatePartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-01-25 18:33:10 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreatePartition", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &CreatePartitionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("CreatePartition failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "CreatePartition failed: "+err.Error()), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreatePartition success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreatePartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DropPartition drop partition
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropPartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-01-25 18:33:10 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropPartition", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &DropPartitionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DropPartition failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "DropPartition failed: "+err.Error()), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropPartition success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropPartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// HasPartition check partition existence
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordHasPartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-01-25 18:33:10 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
|
|
|
Value: false,
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("HasPartition", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &HasPartitionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
HasPartition: false,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("HasPartition failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-01-19 14:44:03 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()),
|
|
|
|
Value: false,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("HasPartition success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("partition name", in.PartitionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordHasPartitionCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-01-19 14:44:03 +08:00
|
|
|
return &milvuspb.BoolResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: succStatus(),
|
|
|
|
Value: t.HasPartition,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ShowPartitions list all partition names
|
2021-03-12 14:22:09 +08:00
|
|
|
func (c *Core) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowPartitionsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowPartitionsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowPartitions", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID))
|
2021-01-19 14:44:03 +08:00
|
|
|
t := &ShowPartitionReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-19 14:44:03 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
2021-11-19 12:11:12 +08:00
|
|
|
Rsp: &milvuspb.ShowPartitionsResponse{},
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("ShowPartitions failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowPartitionsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()),
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowPartitions success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.Int("num of partitions", len(t.Rsp.PartitionNames)),
|
|
|
|
zap.Int64("msgID", t.Req.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowPartitionsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-19 14:44:03 +08:00
|
|
|
return t.Rsp, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// CreateIndex create index
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreateIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-01-25 18:33:10 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateIndex", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-21 10:01:29 +08:00
|
|
|
t := &CreateIndexReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-21 10:01:29 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("CreateIndex failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "CreateIndex failed: "+err.Error()), nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateIndex success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordCreateIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DescribeIndex return index info
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-01-25 18:33:10 +08:00
|
|
|
return &milvuspb.DescribeIndexResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeIndex", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-21 10:01:29 +08:00
|
|
|
t := &DescribeIndexReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-21 10:01:29 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
2021-11-19 12:11:12 +08:00
|
|
|
Rsp: &milvuspb.DescribeIndexResponse{},
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DescribeIndex failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-01-21 10:01:29 +08:00
|
|
|
return &milvuspb.DescribeIndexResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeIndex failed: "+err.Error()),
|
2021-01-21 10:01:29 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-02-24 16:25:40 +08:00
|
|
|
idxNames := make([]string, 0, len(t.Rsp.IndexDescriptions))
|
|
|
|
for _, i := range t.Rsp.IndexDescriptions {
|
|
|
|
idxNames = append(idxNames, i.IndexName)
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeIndex success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.Strings("index names", idxNames), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-03-05 20:41:34 +08:00
|
|
|
if len(t.Rsp.IndexDescriptions) == 0 {
|
2021-11-22 16:01:14 +08:00
|
|
|
t.Rsp.Status = failStatus(commonpb.ErrorCode_IndexNotExist, "index not exist")
|
2021-03-05 20:41:34 +08:00
|
|
|
} else {
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
return t.Rsp, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DropIndex drop index
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropIndex", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.String("index name", in.IndexName), zap.Int64("msgID", in.Base.MsgID))
|
2021-02-20 15:38:44 +08:00
|
|
|
t := &DropIndexReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-02-20 15:38:44 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-02-20 15:38:44 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DropIndex failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.String("index name", in.IndexName), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "DropIndex failed: "+err.Error()), nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropIndex success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection name", in.CollectionName), zap.String("field name", in.FieldName),
|
|
|
|
zap.String("index name", in.IndexName), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDropIndexCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DescribeSegment return segment info
|
2021-02-26 17:44:24 +08:00
|
|
|
func (c *Core) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeSegmentCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-01-25 18:33:10 +08:00
|
|
|
return &milvuspb.DescribeSegmentResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeSegment", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-21 10:01:29 +08:00
|
|
|
t := &DescribeSegmentReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-21 10:01:29 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
2021-11-19 12:11:12 +08:00
|
|
|
Rsp: &milvuspb.DescribeSegmentResponse{},
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DescribeSegment failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-01-21 10:01:29 +08:00
|
|
|
return &milvuspb.DescribeSegmentResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "DescribeSegment failed: "+err.Error()),
|
2021-01-21 10:01:29 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DescribeSegment success", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("segment id", in.SegmentID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordDescribeSegmentCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-21 10:01:29 +08:00
|
|
|
return t.Rsp, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ShowSegments list all segments
|
2021-03-12 14:22:09 +08:00
|
|
|
func (c *Core) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) {
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowSegmentsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsTotal).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowSegmentsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-01-25 18:33:10 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowSegments", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-01-21 10:01:29 +08:00
|
|
|
t := &ShowSegmentReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
2021-03-13 14:42:53 +08:00
|
|
|
ctx: ctx,
|
2021-01-21 10:01:29 +08:00
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
2021-11-19 12:11:12 +08:00
|
|
|
Rsp: &milvuspb.ShowSegmentsResponse{},
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-06-26 09:22:11 +08:00
|
|
|
err := executeTask(t)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowSegments failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-03-12 14:22:09 +08:00
|
|
|
return &milvuspb.ShowSegmentsResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowSegments failed: "+err.Error()),
|
2021-01-21 10:01:29 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("ShowSegments success", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.CollectionID), zap.Int64("partition id", in.PartitionID),
|
|
|
|
zap.Int64s("segments ids", t.Rsp.SegmentIDs),
|
2021-11-19 12:11:12 +08:00
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
|
|
|
|
2021-06-22 19:08:03 +08:00
|
|
|
metrics.RootCoordShowSegmentsCounter.WithLabelValues(metricProxy(in.Base.SourceID), MetricRequestsSuccess).Inc()
|
2021-11-19 12:11:12 +08:00
|
|
|
t.Rsp.Status = succStatus()
|
2021-01-21 10:01:29 +08:00
|
|
|
return t.Rsp, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AllocTimestamp alloc timestamp
|
2021-06-22 16:14:09 +08:00
|
|
|
func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestampRequest) (*rootcoordpb.AllocTimestampResponse, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocTimestampResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-05-26 20:14:30 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
ts, err := c.TSOAllocator(in.Count)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("AllocTimestamp failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocTimestampResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocTimestamp failed: "+err.Error()),
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-07-14 17:11:54 +08:00
|
|
|
|
|
|
|
//return first available time stamp
|
|
|
|
ts = ts - uint64(in.Count) + 1
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocTimestampResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: succStatus(),
|
2021-01-19 14:44:03 +08:00
|
|
|
Timestamp: ts,
|
|
|
|
Count: in.Count,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AllocID alloc ids
|
2021-06-22 16:14:09 +08:00
|
|
|
func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*rootcoordpb.AllocIDResponse, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocIDResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-05-26 20:14:30 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-05-20 14:14:14 +08:00
|
|
|
start, _, err := c.IDAllocator(in.Count)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("AllocID failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocIDResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocID failed: "+err.Error()),
|
|
|
|
Count: in.Count,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("AllocID success", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("id start", start), zap.Uint32("count", in.Count), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-06-22 16:14:09 +08:00
|
|
|
return &rootcoordpb.AllocIDResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: succStatus(),
|
|
|
|
ID: start,
|
|
|
|
Count: in.Count,
|
2021-01-19 14:44:03 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2021-05-21 16:08:12 +08:00
|
|
|
|
|
|
|
// UpdateChannelTimeTick used to handle ChannelTimeTickMsg
|
|
|
|
func (c *Core) UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-05-21 16:08:12 +08:00
|
|
|
}
|
|
|
|
if in.Base.MsgType != commonpb.MsgType_TimeTick {
|
2021-11-19 12:11:12 +08:00
|
|
|
msgTypeName := commonpb.MsgType_name[int32(in.Base.GetMsgType())]
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "invalid message type "+msgTypeName), nil
|
2021-05-21 16:08:12 +08:00
|
|
|
}
|
2021-08-18 14:36:10 +08:00
|
|
|
err := c.chanTimeTick.UpdateTimeTick(in, "gRPC")
|
2021-05-21 16:08:12 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("UpdateTimeTick failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "UpdateTimeTick failed: "+err.Error()), nil
|
2021-05-21 16:08:12 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-05-21 16:08:12 +08:00
|
|
|
}
|
2021-06-17 17:45:56 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ReleaseDQLMessageStream release DQL msgstream
|
2021-06-17 17:45:56 +08:00
|
|
|
func (c *Core) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-06-17 17:45:56 +08:00
|
|
|
}
|
|
|
|
return c.proxyClientManager.ReleaseDQLMessageStream(ctx, in)
|
|
|
|
}
|
2021-07-01 14:58:17 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// SegmentFlushCompleted check whether segment flush has completed
|
2021-07-02 11:16:20 +08:00
|
|
|
func (c *Core) SegmentFlushCompleted(ctx context.Context, in *datapb.SegmentFlushCompletedMsg) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
|
|
|
if in.Base.MsgType != commonpb.MsgType_SegmentFlushDone {
|
2021-11-22 16:01:14 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "invalid msg type "+commonpb.MsgType_name[int32(in.Base.MsgType)]), nil
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
2021-07-02 11:16:20 +08:00
|
|
|
segID := in.Segment.GetID()
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("SegmentFlushCompleted", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.Segment.CollectionID), zap.Int64("partition id", in.Segment.PartitionID),
|
|
|
|
zap.Int64("segment id", segID), zap.Int64("msgID", in.Base.MsgID))
|
2021-07-01 14:58:17 +08:00
|
|
|
|
2021-07-03 14:36:18 +08:00
|
|
|
coll, err := c.MetaTable.GetCollectionByID(in.Segment.CollectionID, 0)
|
2021-07-01 14:58:17 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("GetCollectionByID failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "GetCollectionByID failed: "+err.Error()), nil
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(coll.FieldIndexes) == 0 {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("no index params on collection", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection_name", coll.Schema.Name), zap.Int64("msgID", in.Base.MsgID))
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, f := range coll.FieldIndexes {
|
|
|
|
fieldSch, err := GetFieldSchemaByID(coll, f.FiledID)
|
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Warn("field schema not found", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-07-01 14:58:17 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
idxInfo, err := c.MetaTable.GetIndexByID(f.IndexID)
|
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Warn("index not found", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID),
|
|
|
|
zap.Int64("index id", f.IndexID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-07-01 14:58:17 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
info := etcdpb.SegmentIndexInfo{
|
2021-07-05 10:08:02 +08:00
|
|
|
CollectionID: in.Segment.CollectionID,
|
|
|
|
PartitionID: in.Segment.PartitionID,
|
|
|
|
SegmentID: segID,
|
|
|
|
FieldID: fieldSch.FieldID,
|
|
|
|
IndexID: idxInfo.IndexID,
|
|
|
|
EnableIndex: false,
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
|
|
|
info.BuildID, err = c.BuildIndex(ctx, segID, fieldSch, idxInfo, true)
|
|
|
|
if err == nil && info.BuildID != 0 {
|
|
|
|
info.EnableIndex = true
|
|
|
|
} else {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("BuildIndex failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID),
|
|
|
|
zap.Int64("index id", f.IndexID), zap.Int64("build id", info.BuildID),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-07-03 14:36:18 +08:00
|
|
|
continue
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
2021-10-21 14:04:36 +08:00
|
|
|
err = c.MetaTable.AddIndex(&info)
|
2021-07-01 14:58:17 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("AddIndex failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("collection_name", coll.Schema.Name), zap.Int64("field id", f.FiledID),
|
|
|
|
zap.Int64("index id", f.IndexID), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
continue
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("SegmentFlushCompleted success", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("collection id", in.Segment.CollectionID), zap.Int64("partition id", in.Segment.PartitionID),
|
|
|
|
zap.Int64("segment id", segID), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
return succStatus(), nil
|
2021-07-01 14:58:17 +08:00
|
|
|
}
|
2021-08-31 11:45:59 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetMetrics get metrics
|
2021-11-22 16:01:14 +08:00
|
|
|
func (c *Core) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
2021-08-31 11:45:59 +08:00
|
|
|
return &milvuspb.GetMetricsResponse{
|
2021-11-22 16:01:14 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]),
|
2021-08-31 11:45:59 +08:00
|
|
|
Response: "",
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
metricType, err := metricsinfo.ParseMetricType(in.Request)
|
2021-08-31 11:45:59 +08:00
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("ParseMetricType failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("node_id", c.session.ServerID), zap.String("req", in.Request), zap.Error(err))
|
2021-08-31 11:45:59 +08:00
|
|
|
return &milvuspb.GetMetricsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ParseMetricType failed: "+err.Error()),
|
2021-08-31 11:45:59 +08:00
|
|
|
Response: "",
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("GetMetrics success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("metric_type", metricType), zap.Int64("msgID", in.Base.MsgID))
|
2021-08-31 11:45:59 +08:00
|
|
|
|
|
|
|
if metricType == metricsinfo.SystemInfoMetrics {
|
2021-09-03 17:15:26 +08:00
|
|
|
ret, err := c.metricsCacheManager.GetSystemInfoMetrics()
|
|
|
|
if err == nil && ret != nil {
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Warn("GetSystemInfoMetrics from cache failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-08-31 11:45:59 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
systemInfoMetrics, err := c.getSystemInfoMetrics(ctx, in)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("GetSystemInfoMetrics failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("metric_type", metricType), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-08-31 11:45:59 +08:00
|
|
|
|
2021-09-03 17:15:26 +08:00
|
|
|
c.metricsCacheManager.UpdateSystemInfoMetrics(systemInfoMetrics)
|
2021-08-31 11:45:59 +08:00
|
|
|
return systemInfoMetrics, err
|
|
|
|
}
|
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("GetMetrics failed, metric type not implemented", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("metric_type", metricType), zap.Int64("msgID", in.Base.MsgID))
|
2021-08-31 11:45:59 +08:00
|
|
|
|
|
|
|
return &milvuspb.GetMetricsResponse{
|
2021-11-19 12:11:12 +08:00
|
|
|
Status: failStatus(commonpb.ErrorCode_UnexpectedError, metricsinfo.MsgUnimplementedMetric),
|
2021-08-31 11:45:59 +08:00
|
|
|
Response: "",
|
|
|
|
}, nil
|
|
|
|
}
|
2021-09-18 11:13:51 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// CreateAlias create collection alias
|
2021-09-18 11:13:51 +08:00
|
|
|
func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateAlias", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-09-18 11:13:51 +08:00
|
|
|
t := &CreateAliasReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
|
|
|
ctx: ctx,
|
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
|
|
|
err := executeTask(t)
|
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("CreateAlias failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "CreateAlias failed: "+err.Error()), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("CreateAlias success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
|
|
|
return succStatus(), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DropAlias drop collection alias
|
2021-09-18 11:13:51 +08:00
|
|
|
func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropAlias", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID))
|
2021-09-18 11:13:51 +08:00
|
|
|
t := &DropAliasReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
|
|
|
ctx: ctx,
|
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
|
|
|
err := executeTask(t)
|
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("DropAlias failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "DropAlias failed: "+err.Error()), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("DropAlias success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
|
|
|
return succStatus(), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AlterAlias alter collection alias
|
2021-09-18 11:13:51 +08:00
|
|
|
func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (*commonpb.Status, error) {
|
2021-11-19 12:11:12 +08:00
|
|
|
if code, ok := c.checkHealthy(); !ok {
|
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+internalpb.StateCode_name[int32(code)]), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-19 12:11:12 +08:00
|
|
|
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("AlterAlias", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-09-18 11:13:51 +08:00
|
|
|
t := &AlterAliasReqTask{
|
|
|
|
baseReqTask: baseReqTask{
|
|
|
|
ctx: ctx,
|
|
|
|
core: c,
|
|
|
|
},
|
|
|
|
Req: in,
|
|
|
|
}
|
|
|
|
err := executeTask(t)
|
|
|
|
if err != nil {
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Error("AlterAlias failed", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID), zap.Error(err))
|
2021-11-19 12:11:12 +08:00
|
|
|
return failStatus(commonpb.ErrorCode_UnexpectedError, "AlterAlias failed: "+err.Error()), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
2021-11-22 16:01:14 +08:00
|
|
|
log.Debug("AlterAlias success", zap.String("role", Params.RoleName),
|
|
|
|
zap.String("alias", in.Alias), zap.String("collection name", in.CollectionName),
|
|
|
|
zap.Int64("msgID", in.Base.MsgID))
|
2021-11-19 12:11:12 +08:00
|
|
|
|
|
|
|
return succStatus(), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|