2021-04-19 11:12:56 +08:00
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
2021-06-18 21:30:08 +08:00
|
|
|
package rootcoord
|
2021-01-19 14:44:03 +08:00
|
|
|
|
|
|
|
import (
|
2021-03-05 10:15:27 +08:00
|
|
|
"fmt"
|
2021-01-20 09:36:50 +08:00
|
|
|
"path"
|
2021-01-19 14:44:03 +08:00
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
2021-02-27 10:11:52 +08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/kv"
|
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
|
|
|
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
2021-05-14 21:26:06 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2021-01-19 14:44:03 +08:00
|
|
|
)
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
const (
|
2021-09-23 15:10:00 +08:00
|
|
|
// ComponentPrefix prefix for rootcoord component
|
|
|
|
ComponentPrefix = "root-coord"
|
|
|
|
|
|
|
|
// TenantMetaPrefix prefix for tenant meta
|
|
|
|
TenantMetaPrefix = ComponentPrefix + "/tenant"
|
|
|
|
|
|
|
|
// ProxyMetaPrefix prefix for proxy meta
|
|
|
|
ProxyMetaPrefix = ComponentPrefix + "/proxy"
|
|
|
|
|
|
|
|
// CollectionMetaPrefix prefix for collection meta
|
|
|
|
CollectionMetaPrefix = ComponentPrefix + "/collection"
|
|
|
|
|
|
|
|
// SegmentIndexMetaPrefix prefix for segment index meta
|
|
|
|
SegmentIndexMetaPrefix = ComponentPrefix + "/segment-index"
|
|
|
|
|
|
|
|
// IndexMetaPrefix prefix for index meta
|
|
|
|
IndexMetaPrefix = ComponentPrefix + "/index"
|
|
|
|
|
|
|
|
// CollectionAliasMetaPrefix prefix for collection alias meta
|
2021-09-18 11:13:51 +08:00
|
|
|
CollectionAliasMetaPrefix = ComponentPrefix + "/collection-alias"
|
2021-05-12 15:33:53 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// TimestampPrefix prefix for timestamp
|
2021-05-18 14:18:02 +08:00
|
|
|
TimestampPrefix = ComponentPrefix + "/timestamp"
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DDOperationPrefix prefix for DD operation
|
2021-05-14 21:26:06 +08:00
|
|
|
DDOperationPrefix = ComponentPrefix + "/dd-operation"
|
2021-05-12 15:33:53 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DDMsgSendPrefix prefix to indicate whether DD msg has been send
|
|
|
|
DDMsgSendPrefix = ComponentPrefix + "/dd-msg-send"
|
|
|
|
|
|
|
|
// CreateCollectionDDType name of DD type for create collection
|
2021-05-14 21:26:06 +08:00
|
|
|
CreateCollectionDDType = "CreateCollection"
|
2021-09-23 15:10:00 +08:00
|
|
|
|
|
|
|
// DropCollectionDDType name of DD type for drop collection
|
|
|
|
DropCollectionDDType = "DropCollection"
|
|
|
|
|
|
|
|
// CreatePartitionDDType name of DD type for create partition
|
|
|
|
CreatePartitionDDType = "CreatePartition"
|
|
|
|
|
|
|
|
// DropPartitionDDType name of DD type for drop partition
|
|
|
|
DropPartitionDDType = "DropPartition"
|
|
|
|
|
|
|
|
// CreateAliasDDType name of DD type for create collection alias
|
|
|
|
CreateAliasDDType = "CreateAlias"
|
|
|
|
|
|
|
|
// DropAliasDDType name of DD type for drop collection alias
|
|
|
|
DropAliasDDType = "DropAlias"
|
|
|
|
|
|
|
|
// AlterAliasDDType name of DD type for alter collection alias
|
|
|
|
AlterAliasDDType = "AlterAlias"
|
2021-01-20 09:36:50 +08:00
|
|
|
)
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// MetaTable store all rootcoord meta info
|
|
|
|
type MetaTable struct {
|
2021-07-03 14:36:18 +08:00
|
|
|
client kv.SnapShotKV // client of a reliable kv service, i.e. etcd client
|
|
|
|
tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
|
|
|
|
proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
|
|
|
|
collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection_id -> meta
|
|
|
|
collName2ID map[string]typeutil.UniqueID // collection name to collection id
|
2021-09-18 11:13:51 +08:00
|
|
|
collAlias2ID map[string]typeutil.UniqueID // collection alias to collection id
|
2021-07-03 20:00:40 +08:00
|
|
|
partID2SegID map[typeutil.UniqueID]map[typeutil.UniqueID]bool // partition_id -> segment_id -> bool
|
2021-07-03 14:36:18 +08:00
|
|
|
segID2IndexMeta map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo // collection_id/index_id/partition_id/segment_id -> meta
|
|
|
|
indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // collection_id/index_id -> meta
|
2021-01-19 14:44:03 +08:00
|
|
|
|
|
|
|
tenantLock sync.RWMutex
|
|
|
|
proxyLock sync.RWMutex
|
|
|
|
ddLock sync.RWMutex
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// NewMetaTable create meta table for rootcoord, which stores all in-memory information
|
|
|
|
// for collection, partion, segment, index etc.
|
|
|
|
func NewMetaTable(kv kv.SnapShotKV) (*MetaTable, error) {
|
|
|
|
mt := &MetaTable{
|
2021-01-19 14:44:03 +08:00
|
|
|
client: kv,
|
|
|
|
tenantLock: sync.RWMutex{},
|
|
|
|
proxyLock: sync.RWMutex{},
|
|
|
|
ddLock: sync.RWMutex{},
|
|
|
|
}
|
|
|
|
err := mt.reloadFromKV()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return mt, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) reloadFromKV() error {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.tenantID2Meta = make(map[typeutil.UniqueID]pb.TenantMeta)
|
|
|
|
mt.proxyID2Meta = make(map[typeutil.UniqueID]pb.ProxyMeta)
|
2021-01-20 09:36:50 +08:00
|
|
|
mt.collID2Meta = make(map[typeutil.UniqueID]pb.CollectionInfo)
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.collName2ID = make(map[string]typeutil.UniqueID)
|
2021-09-18 11:13:51 +08:00
|
|
|
mt.collAlias2ID = make(map[string]typeutil.UniqueID)
|
2021-07-03 20:00:40 +08:00
|
|
|
mt.partID2SegID = make(map[typeutil.UniqueID]map[typeutil.UniqueID]bool)
|
2021-07-03 14:36:18 +08:00
|
|
|
mt.segID2IndexMeta = make(map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo)
|
2021-01-20 09:36:50 +08:00
|
|
|
mt.indexID2Meta = make(map[typeutil.UniqueID]pb.IndexInfo)
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
_, values, err := mt.client.LoadWithPrefix(TenantMetaPrefix, 0)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, value := range values {
|
|
|
|
tenantMeta := pb.TenantMeta{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err := proto.Unmarshal([]byte(value), &tenantMeta)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.TenantMeta err:%w", err)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
mt.tenantID2Meta[tenantMeta.ID] = tenantMeta
|
|
|
|
}
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
_, values, err = mt.client.LoadWithPrefix(ProxyMetaPrefix, 0)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, value := range values {
|
|
|
|
proxyMeta := pb.ProxyMeta{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(value), &proxyMeta)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.ProxyMeta err:%w", err)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
mt.proxyID2Meta[proxyMeta.ID] = proxyMeta
|
|
|
|
}
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
_, values, err = mt.client.LoadWithPrefix(CollectionMetaPrefix, 0)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, value := range values {
|
2021-05-17 19:15:01 +08:00
|
|
|
collInfo := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(value), &collInfo)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.CollectionInfo err:%w", err)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-17 19:15:01 +08:00
|
|
|
mt.collID2Meta[collInfo.ID] = collInfo
|
|
|
|
mt.collName2ID[collInfo.Schema.Name] = collInfo.ID
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
_, values, err = mt.client.LoadWithPrefix(SegmentIndexMetaPrefix, 0)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
for _, value := range values {
|
|
|
|
segmentIndexInfo := pb.SegmentIndexInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(value), &segmentIndexInfo)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.SegmentIndexInfo err:%w", err)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-07-03 20:00:40 +08:00
|
|
|
|
|
|
|
// update partID2SegID
|
|
|
|
segIDMap, ok := mt.partID2SegID[segmentIndexInfo.PartitionID]
|
|
|
|
if ok {
|
|
|
|
segIDMap[segmentIndexInfo.SegmentID] = true
|
|
|
|
} else {
|
|
|
|
idMap := make(map[typeutil.UniqueID]bool)
|
|
|
|
idMap[segmentIndexInfo.SegmentID] = true
|
|
|
|
mt.partID2SegID[segmentIndexInfo.PartitionID] = idMap
|
|
|
|
}
|
|
|
|
|
|
|
|
// update segID2IndexMeta
|
2021-01-20 09:36:50 +08:00
|
|
|
idx, ok := mt.segID2IndexMeta[segmentIndexInfo.SegmentID]
|
2021-01-19 14:44:03 +08:00
|
|
|
if ok {
|
2021-07-03 14:36:18 +08:00
|
|
|
idx[segmentIndexInfo.IndexID] = segmentIndexInfo
|
2021-01-20 09:36:50 +08:00
|
|
|
} else {
|
|
|
|
meta := make(map[typeutil.UniqueID]pb.SegmentIndexInfo)
|
|
|
|
meta[segmentIndexInfo.IndexID] = segmentIndexInfo
|
2021-07-03 14:36:18 +08:00
|
|
|
mt.segID2IndexMeta[segmentIndexInfo.SegmentID] = meta
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
_, values, err = mt.client.LoadWithPrefix(IndexMetaPrefix, 0)
|
2021-01-20 09:36:50 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-01-19 18:32:57 +08:00
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
for _, value := range values {
|
|
|
|
meta := pb.IndexInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(value), &meta)
|
2021-01-20 09:36:50 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.IndexInfo err:%w", err)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
mt.indexID2Meta[meta.IndexID] = meta
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-18 11:13:51 +08:00
|
|
|
_, values, err = mt.client.LoadWithPrefix(CollectionAliasMetaPrefix, 0)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, value := range values {
|
|
|
|
aliasInfo := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(value), &aliasInfo)
|
2021-09-18 11:13:51 +08:00
|
|
|
if err != nil {
|
2021-09-23 10:37:54 +08:00
|
|
|
return fmt.Errorf("RootCoord Unmarshal pb.AliasInfo err:%w", err)
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
|
|
|
mt.collAlias2ID[aliasInfo.Schema.Name] = aliasInfo.ID
|
|
|
|
}
|
|
|
|
|
2021-09-18 09:13:50 +08:00
|
|
|
log.Debug("reload meta table from KV successfully")
|
2021-01-20 09:36:50 +08:00
|
|
|
return nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) getAdditionKV(op func(ts typeutil.Timestamp) (string, error), meta map[string]string) func(ts typeutil.Timestamp) (string, string, error) {
|
2021-05-20 14:14:14 +08:00
|
|
|
if op == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
meta[DDMsgSendPrefix] = "false"
|
|
|
|
return func(ts typeutil.Timestamp) (string, string, error) {
|
|
|
|
val, err := op(ts)
|
|
|
|
if err != nil {
|
|
|
|
return "", "", err
|
|
|
|
}
|
|
|
|
return DDOperationPrefix, val, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddTenant add tenant
|
|
|
|
func (mt *MetaTable) AddTenant(te *pb.TenantMeta, ts typeutil.Timestamp) error {
|
2021-04-08 17:31:39 +08:00
|
|
|
mt.tenantLock.Lock()
|
|
|
|
defer mt.tenantLock.Unlock()
|
|
|
|
|
|
|
|
k := fmt.Sprintf("%s/%d", TenantMetaPrefix, te.ID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(te)
|
2021-04-08 17:31:39 +08:00
|
|
|
|
2021-09-23 10:37:54 +08:00
|
|
|
err := mt.client.Save(k, string(v), ts)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV Save fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV Save fail")
|
2021-04-08 17:31:39 +08:00
|
|
|
}
|
|
|
|
mt.tenantID2Meta[te.ID] = *te
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-04-08 17:31:39 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddProxy add proxy
|
|
|
|
func (mt *MetaTable) AddProxy(po *pb.ProxyMeta, ts typeutil.Timestamp) error {
|
2021-04-08 17:31:39 +08:00
|
|
|
mt.proxyLock.Lock()
|
|
|
|
defer mt.proxyLock.Unlock()
|
|
|
|
|
|
|
|
k := fmt.Sprintf("%s/%d", ProxyMetaPrefix, po.ID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(po)
|
2021-04-08 17:31:39 +08:00
|
|
|
|
2021-09-23 10:37:54 +08:00
|
|
|
err := mt.client.Save(k, string(v), ts)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV Save fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV Save fail")
|
2021-04-08 17:31:39 +08:00
|
|
|
}
|
|
|
|
mt.proxyID2Meta[po.ID] = *po
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-04-08 17:31:39 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddCollection add collection
|
|
|
|
func (mt *MetaTable) AddCollection(coll *pb.CollectionInfo, ts typeutil.Timestamp, idx []*pb.IndexInfo, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
2021-01-20 09:36:50 +08:00
|
|
|
|
2021-07-21 18:00:14 +08:00
|
|
|
if len(coll.PartitionIDs) != len(coll.PartitionNames) ||
|
|
|
|
len(coll.PartitionIDs) != len(coll.PartitionCreatedTimestamps) ||
|
2021-07-23 14:36:12 +08:00
|
|
|
(len(coll.PartitionIDs) != 1 && len(coll.PartitionIDs) != 0) {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("PartitionIDs, PartitionNames and PartitionCreatedTimestmaps' length mis-match when creating collection")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-01-22 15:41:54 +08:00
|
|
|
if _, ok := mt.collName2ID[coll.Schema.Name]; ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("collection %s exist", coll.Schema.Name)
|
2021-01-22 15:41:54 +08:00
|
|
|
}
|
2021-02-11 08:41:59 +08:00
|
|
|
if len(coll.FieldIndexes) != len(idx) {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("incorrect index id when creating collection")
|
2021-02-11 08:41:59 +08:00
|
|
|
}
|
2021-01-22 15:41:54 +08:00
|
|
|
|
2021-02-11 08:41:59 +08:00
|
|
|
for _, i := range idx {
|
|
|
|
mt.indexID2Meta[i.IndexID] = *i
|
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
|
2021-07-23 14:36:12 +08:00
|
|
|
meta := make(map[string]string)
|
2021-01-20 09:36:50 +08:00
|
|
|
|
2021-02-11 08:41:59 +08:00
|
|
|
for _, i := range idx {
|
2021-03-06 16:00:41 +08:00
|
|
|
k := fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, coll.ID, i.IndexID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(i)
|
|
|
|
meta[k] = string(v)
|
2021-02-11 08:41:59 +08:00
|
|
|
}
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
// save ddOpStr into etcd
|
2021-05-20 14:14:14 +08:00
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
2021-07-23 14:36:12 +08:00
|
|
|
saveColl := func(ts typeutil.Timestamp) (string, string, error) {
|
|
|
|
coll.CreateTime = ts
|
|
|
|
if len(coll.PartitionCreatedTimestamps) == 1 {
|
|
|
|
coll.PartitionCreatedTimestamps[0] = ts
|
|
|
|
}
|
|
|
|
mt.collID2Meta[coll.ID] = *coll
|
|
|
|
mt.collName2ID[coll.Schema.Name] = coll.ID
|
|
|
|
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, coll.ID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v1, _ := proto.Marshal(coll)
|
|
|
|
meta[k1] = string(v1)
|
|
|
|
return k1, string(v1), nil
|
2021-07-23 14:36:12 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
err := mt.client.MultiSave(meta, ts, addition, saveColl)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-05-12 15:33:53 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DeleteCollection delete collection
|
|
|
|
func (mt *MetaTable) DeleteCollection(collID typeutil.UniqueID, ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("can't find collection. id = %d", collID)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
delete(mt.collID2Meta, collID)
|
|
|
|
delete(mt.collName2ID, collMeta.Schema.Name)
|
2021-07-03 20:00:40 +08:00
|
|
|
|
|
|
|
// update segID2IndexMeta
|
|
|
|
for partID := range collMeta.PartitionIDs {
|
|
|
|
if segIDMap, ok := mt.partID2SegID[typeutil.UniqueID(partID)]; ok {
|
|
|
|
for segID := range segIDMap {
|
|
|
|
delete(mt.segID2IndexMeta, segID)
|
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-03 20:00:40 +08:00
|
|
|
|
|
|
|
// update partID2SegID
|
|
|
|
for partID := range collMeta.PartitionIDs {
|
|
|
|
delete(mt.partID2SegID, typeutil.UniqueID(partID))
|
|
|
|
}
|
|
|
|
|
2021-03-06 16:00:41 +08:00
|
|
|
for _, idxInfo := range collMeta.FieldIndexes {
|
|
|
|
_, ok := mt.indexID2Meta[idxInfo.IndexID]
|
|
|
|
if !ok {
|
|
|
|
log.Warn("index id not exist", zap.Int64("index id", idxInfo.IndexID))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
delete(mt.indexID2Meta, idxInfo.IndexID)
|
|
|
|
}
|
2021-09-18 11:13:51 +08:00
|
|
|
var aliases []string
|
|
|
|
// delete collection aliases
|
|
|
|
for alias, cid := range mt.collAlias2ID {
|
|
|
|
if cid == collID {
|
|
|
|
aliases = append(aliases, alias)
|
|
|
|
}
|
|
|
|
}
|
2021-05-17 19:15:01 +08:00
|
|
|
|
2021-05-12 15:33:53 +08:00
|
|
|
delMetakeys := []string{
|
2021-03-06 16:00:41 +08:00
|
|
|
fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID),
|
|
|
|
fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collID),
|
|
|
|
fmt.Sprintf("%s/%d", IndexMetaPrefix, collID),
|
|
|
|
}
|
2021-05-12 15:33:53 +08:00
|
|
|
|
2021-09-18 11:13:51 +08:00
|
|
|
for _, alias := range aliases {
|
|
|
|
delete(mt.collAlias2ID, alias)
|
|
|
|
delMetakeys = append(delMetakeys,
|
|
|
|
fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
// save ddOpStr into etcd
|
2021-05-20 14:14:14 +08:00
|
|
|
var saveMeta = map[string]string{}
|
|
|
|
addition := mt.getAdditionKV(ddOpStr, saveMeta)
|
2021-08-18 14:36:10 +08:00
|
|
|
err := mt.client.MultiSaveAndRemoveWithPrefix(saveMeta, delMetakeys, ts, addition)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// HasCollection return collection existence
|
|
|
|
func (mt *MetaTable) HasCollection(collID typeutil.UniqueID, ts typeutil.Timestamp) bool {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-05-18 14:18:02 +08:00
|
|
|
if ts == 0 {
|
|
|
|
_, ok := mt.collID2Meta[collID]
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
key := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
|
|
|
_, err := mt.client.Load(key, ts)
|
|
|
|
return err == nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetCollectionByID return collection meta by collection id
|
|
|
|
func (mt *MetaTable) GetCollectionByID(collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*pb.CollectionInfo, error) {
|
2021-01-21 10:01:29 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
if ts == 0 {
|
|
|
|
col, ok := mt.collID2Meta[collectionID]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("can't find collection id : %d", collectionID)
|
|
|
|
}
|
|
|
|
colCopy := proto.Clone(&col)
|
|
|
|
return colCopy.(*pb.CollectionInfo), nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-05-18 14:18:02 +08:00
|
|
|
key := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionID)
|
|
|
|
val, err := mt.client.Load(key, ts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
colMeta := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(val), &colMeta)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &colMeta, nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetCollectionByName return collection meta by collection name
|
|
|
|
func (mt *MetaTable) GetCollectionByName(collectionName string, ts typeutil.Timestamp) (*pb.CollectionInfo, error) {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
if ts == 0 {
|
|
|
|
vid, ok := mt.collName2ID[collectionName]
|
|
|
|
if !ok {
|
2021-09-18 11:13:51 +08:00
|
|
|
if vid, ok = mt.collAlias2ID[collectionName]; !ok {
|
|
|
|
return nil, fmt.Errorf("can't find collection: " + collectionName)
|
|
|
|
}
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
col, ok := mt.collID2Meta[vid]
|
|
|
|
if !ok {
|
2021-09-13 11:56:42 +08:00
|
|
|
return nil, fmt.Errorf("can't find collection %s with id %d", collectionName, vid)
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
colCopy := proto.Clone(&col)
|
|
|
|
return colCopy.(*pb.CollectionInfo), nil
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
2021-05-18 14:18:02 +08:00
|
|
|
_, vals, err := mt.client.LoadWithPrefix(CollectionMetaPrefix, ts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
2021-05-18 14:18:02 +08:00
|
|
|
for _, val := range vals {
|
|
|
|
collMeta := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(val), &collMeta)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("unmarshal collection info failed", zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if collMeta.Schema.Name == collectionName {
|
|
|
|
return &collMeta, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("can't find collection: %s, at timestamp = %d", collectionName, ts)
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ListCollections list all collection names
|
|
|
|
func (mt *MetaTable) ListCollections(ts typeutil.Timestamp) (map[string]*pb.CollectionInfo, error) {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-07-21 18:00:14 +08:00
|
|
|
colls := make(map[string]*pb.CollectionInfo)
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-05-18 14:18:02 +08:00
|
|
|
if ts == 0 {
|
2021-07-21 18:00:14 +08:00
|
|
|
for collName, collID := range mt.collName2ID {
|
|
|
|
coll := mt.collID2Meta[collID]
|
|
|
|
colCopy := proto.Clone(&coll)
|
|
|
|
colls[collName] = colCopy.(*pb.CollectionInfo)
|
2021-06-26 22:26:20 +08:00
|
|
|
}
|
|
|
|
return colls, nil
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
_, vals, err := mt.client.LoadWithPrefix(CollectionMetaPrefix, ts)
|
|
|
|
if err != nil {
|
2021-05-18 17:12:17 +08:00
|
|
|
log.Debug("load with prefix error", zap.Uint64("timestamp", ts), zap.Error(err))
|
2021-06-03 19:09:33 +08:00
|
|
|
return nil, nil
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
for _, val := range vals {
|
|
|
|
collMeta := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err := proto.Unmarshal([]byte(val), &collMeta)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("unmarshal collection info failed", zap.Error(err))
|
|
|
|
}
|
2021-07-21 18:00:14 +08:00
|
|
|
colls[collMeta.Schema.Name] = &collMeta
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
return colls, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ListAliases list all collection aliases
|
|
|
|
func (mt *MetaTable) ListAliases(collID typeutil.UniqueID) []string {
|
2021-09-22 16:20:48 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
var aliases []string
|
|
|
|
for alias, cid := range mt.collAlias2ID {
|
|
|
|
if cid == collID {
|
|
|
|
aliases = append(aliases, alias)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return aliases
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ListCollectionVirtualChannels list virtual channels of all collections
|
|
|
|
func (mt *MetaTable) ListCollectionVirtualChannels() []string {
|
2021-06-04 15:00:34 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
vlist := []string{}
|
|
|
|
|
|
|
|
for _, c := range mt.collID2Meta {
|
|
|
|
vlist = append(vlist, c.VirtualChannelNames...)
|
|
|
|
}
|
|
|
|
return vlist
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// ListCollectionPhysicalChannels list physical channels of all collections
|
|
|
|
func (mt *MetaTable) ListCollectionPhysicalChannels() []string {
|
2021-06-04 15:00:34 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
plist := []string{}
|
|
|
|
|
|
|
|
for _, c := range mt.collID2Meta {
|
|
|
|
plist = append(plist, c.PhysicalChannelNames...)
|
|
|
|
}
|
|
|
|
return plist
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddPartition add partition
|
|
|
|
func (mt *MetaTable) AddPartition(collID typeutil.UniqueID, partitionName string, partitionID typeutil.UniqueID, ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
coll, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("can't find collection. id = %d", collID)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// number of partition tags (except _default) should be limited to 4096 by default
|
2021-04-10 10:53:58 +08:00
|
|
|
if int64(len(coll.PartitionIDs)) >= Params.MaxPartitionNum {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("maximum partition's number should be limit to %d", Params.MaxPartitionNum)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
|
2021-07-06 09:16:03 +08:00
|
|
|
if len(coll.PartitionIDs) != len(coll.PartitionNames) {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("len(coll.PartitionIDs)=%d, len(coll.PartitionNames)=%d", len(coll.PartitionIDs), len(coll.PartitionNames))
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
|
|
|
|
2021-07-21 18:00:14 +08:00
|
|
|
if len(coll.PartitionIDs) != len(coll.PartitionCreatedTimestamps) {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("len(coll.PartitionIDs)=%d, len(coll.PartitionCreatedTimestamps)=%d", len(coll.PartitionIDs), len(coll.PartitionCreatedTimestamps))
|
2021-07-21 18:00:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(coll.PartitionNames) != len(coll.PartitionCreatedTimestamps) {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("len(coll.PartitionNames)=%d, len(coll.PartitionCreatedTimestamps)=%d", len(coll.PartitionNames), len(coll.PartitionCreatedTimestamps))
|
2021-07-21 18:00:14 +08:00
|
|
|
}
|
|
|
|
|
2021-07-03 14:36:18 +08:00
|
|
|
for idx := range coll.PartitionIDs {
|
|
|
|
if coll.PartitionIDs[idx] == partitionID {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("partition id = %d already exists", partitionID)
|
2021-01-20 09:36:50 +08:00
|
|
|
}
|
2021-07-06 09:16:03 +08:00
|
|
|
if coll.PartitionNames[idx] == partitionName {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("partition name = %s already exists", partitionName)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-07-21 18:00:14 +08:00
|
|
|
// no necessary to check created timestamp
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-07-23 14:36:12 +08:00
|
|
|
meta := make(map[string]string)
|
2021-01-20 09:36:50 +08:00
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
// save ddOpStr into etcd
|
2021-05-20 14:14:14 +08:00
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-07-23 14:36:12 +08:00
|
|
|
saveColl := func(ts typeutil.Timestamp) (string, string, error) {
|
|
|
|
coll.PartitionIDs = append(coll.PartitionIDs, partitionID)
|
|
|
|
coll.PartitionNames = append(coll.PartitionNames, partitionName)
|
|
|
|
coll.PartitionCreatedTimestamps = append(coll.PartitionCreatedTimestamps, ts)
|
|
|
|
mt.collID2Meta[collID] = coll
|
|
|
|
|
|
|
|
k1 := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v1, _ := proto.Marshal(&coll)
|
|
|
|
meta[k1] = string(v1)
|
2021-07-23 14:36:12 +08:00
|
|
|
|
2021-09-23 10:37:54 +08:00
|
|
|
return k1, string(v1), nil
|
2021-07-23 14:36:12 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
err := mt.client.MultiSave(meta, ts, addition, saveColl)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetPartitionNameByID return partition name by partition id
|
|
|
|
func (mt *MetaTable) GetPartitionNameByID(collID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) (string, error) {
|
2021-05-18 14:18:02 +08:00
|
|
|
if ts == 0 {
|
2021-07-03 14:36:18 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-05-18 14:18:02 +08:00
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-07-03 14:36:18 +08:00
|
|
|
return "", fmt.Errorf("can't find collection id = %d", collID)
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
for idx := range collMeta.PartitionIDs {
|
|
|
|
if collMeta.PartitionIDs[idx] == partitionID {
|
2021-07-06 09:16:03 +08:00
|
|
|
return collMeta.PartitionNames[idx], nil
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
return "", fmt.Errorf("partition %d does not exist", partitionID)
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
|
|
|
collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
|
|
|
collVal, err := mt.client.Load(collKey, ts)
|
|
|
|
if err != nil {
|
2021-07-03 14:36:18 +08:00
|
|
|
return "", err
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
collMeta := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(collVal), &collMeta)
|
2021-05-18 14:18:02 +08:00
|
|
|
if err != nil {
|
2021-07-03 14:36:18 +08:00
|
|
|
return "", err
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
for idx := range collMeta.PartitionIDs {
|
|
|
|
if collMeta.PartitionIDs[idx] == partitionID {
|
2021-07-06 09:16:03 +08:00
|
|
|
return collMeta.PartitionNames[idx], nil
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
}
|
|
|
|
return "", fmt.Errorf("partition %d does not exist", partitionID)
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) getPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
2021-07-03 14:36:18 +08:00
|
|
|
if ts == 0 {
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
|
|
|
return 0, fmt.Errorf("can't find collection id = %d", collID)
|
2021-05-18 14:18:02 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
for idx := range collMeta.PartitionIDs {
|
2021-07-06 09:16:03 +08:00
|
|
|
if collMeta.PartitionNames[idx] == partitionName {
|
2021-07-03 14:36:18 +08:00
|
|
|
return collMeta.PartitionIDs[idx], nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0, fmt.Errorf("partition %s does not exist", partitionName)
|
|
|
|
}
|
|
|
|
collKey := fmt.Sprintf("%s/%d", CollectionMetaPrefix, collID)
|
|
|
|
collVal, err := mt.client.Load(collKey, ts)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
collMeta := pb.CollectionInfo{}
|
2021-09-23 10:37:54 +08:00
|
|
|
err = proto.Unmarshal([]byte(collVal), &collMeta)
|
2021-07-03 14:36:18 +08:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
for idx := range collMeta.PartitionIDs {
|
2021-07-06 09:16:03 +08:00
|
|
|
if collMeta.PartitionNames[idx] == partitionName {
|
2021-07-03 14:36:18 +08:00
|
|
|
return collMeta.PartitionIDs[idx], nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
return 0, fmt.Errorf("partition %s does not exist", partitionName)
|
2021-05-14 21:26:06 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetPartitionByName return partition id by partition name
|
|
|
|
func (mt *MetaTable) GetPartitionByName(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error) {
|
2021-05-14 21:26:06 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-05-18 14:18:02 +08:00
|
|
|
return mt.getPartitionByName(collID, partitionName, ts)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// HasPartition check partition existence
|
|
|
|
func (mt *MetaTable) HasPartition(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp) bool {
|
2021-05-14 21:26:06 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-05-18 14:18:02 +08:00
|
|
|
_, err := mt.getPartitionByName(collID, partitionName, ts)
|
2021-05-14 21:26:06 +08:00
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DeletePartition delete partition
|
|
|
|
func (mt *MetaTable) DeletePartition(collID typeutil.UniqueID, partitionName string, ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) (typeutil.UniqueID, error) {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
|
2021-01-20 09:36:50 +08:00
|
|
|
if partitionName == Params.DefaultPartitionName {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, fmt.Errorf("default partition cannot be deleted")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, fmt.Errorf("can't find collection id = %d", collID)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// check tag exists
|
|
|
|
exist := false
|
|
|
|
|
|
|
|
pd := make([]typeutil.UniqueID, 0, len(collMeta.PartitionIDs))
|
2021-07-06 09:16:03 +08:00
|
|
|
pn := make([]string, 0, len(collMeta.PartitionNames))
|
2021-07-21 18:00:14 +08:00
|
|
|
pts := make([]uint64, 0, len(collMeta.PartitionCreatedTimestamps))
|
2021-07-03 14:36:18 +08:00
|
|
|
var partID typeutil.UniqueID
|
|
|
|
for idx := range collMeta.PartitionIDs {
|
2021-07-06 09:16:03 +08:00
|
|
|
if collMeta.PartitionNames[idx] == partitionName {
|
2021-07-03 14:36:18 +08:00
|
|
|
partID = collMeta.PartitionIDs[idx]
|
|
|
|
exist = true
|
|
|
|
} else {
|
|
|
|
pd = append(pd, collMeta.PartitionIDs[idx])
|
2021-07-06 09:16:03 +08:00
|
|
|
pn = append(pn, collMeta.PartitionNames[idx])
|
2021-07-21 18:00:14 +08:00
|
|
|
pts = append(pts, collMeta.PartitionCreatedTimestamps[idx])
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exist {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, fmt.Errorf("partition %s does not exist", partitionName)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-01-20 09:36:50 +08:00
|
|
|
collMeta.PartitionIDs = pd
|
2021-07-06 09:16:03 +08:00
|
|
|
collMeta.PartitionNames = pn
|
2021-07-21 18:00:14 +08:00
|
|
|
collMeta.PartitionCreatedTimestamps = pts
|
2021-01-20 09:36:50 +08:00
|
|
|
mt.collID2Meta[collID] = collMeta
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-07-03 20:00:40 +08:00
|
|
|
// update segID2IndexMeta and partID2SegID
|
|
|
|
if segIDMap, ok := mt.partID2SegID[partID]; ok {
|
|
|
|
for segID := range segIDMap {
|
|
|
|
delete(mt.segID2IndexMeta, segID)
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-03 20:00:40 +08:00
|
|
|
delete(mt.partID2SegID, partID)
|
|
|
|
|
2021-09-23 10:37:54 +08:00
|
|
|
k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collID, 10))
|
|
|
|
v, _ := proto.Marshal(&collMeta)
|
|
|
|
meta := map[string]string{k: string(v)}
|
2021-07-03 14:36:18 +08:00
|
|
|
delMetaKeys := []string{}
|
2021-03-06 16:00:41 +08:00
|
|
|
for _, idxInfo := range collMeta.FieldIndexes {
|
2021-07-03 14:36:18 +08:00
|
|
|
k := fmt.Sprintf("%s/%d/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, idxInfo.IndexID, partID)
|
2021-03-06 16:00:41 +08:00
|
|
|
delMetaKeys = append(delMetaKeys, k)
|
|
|
|
}
|
|
|
|
|
2021-05-14 21:26:06 +08:00
|
|
|
// save ddOpStr into etcd
|
2021-05-20 14:14:14 +08:00
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
2021-01-19 14:44:03 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
err := mt.client.MultiSaveAndRemoveWithPrefix(meta, delMetaKeys, ts, addition)
|
2021-01-19 14:44:03 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail")
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
2021-08-18 14:36:10 +08:00
|
|
|
return partID, nil
|
2021-01-19 14:44:03 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddIndex add index
|
|
|
|
func (mt *MetaTable) AddIndex(segIdxInfo *pb.SegmentIndexInfo, ts typeutil.Timestamp) error {
|
2021-01-19 14:44:03 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
2021-05-24 14:19:52 +08:00
|
|
|
|
2021-07-05 10:08:02 +08:00
|
|
|
collMeta, ok := mt.collID2Meta[segIdxInfo.CollectionID]
|
2021-07-03 14:36:18 +08:00
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("collection id = %d not found", segIdxInfo.CollectionID)
|
2021-01-19 18:32:57 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
exist := false
|
|
|
|
for _, fidx := range collMeta.FieldIndexes {
|
|
|
|
if fidx.IndexID == segIdxInfo.IndexID {
|
|
|
|
exist = true
|
|
|
|
break
|
|
|
|
}
|
2021-05-24 14:19:52 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
if !exist {
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("index id = %d not found", segIdxInfo.IndexID)
|
2021-01-19 18:32:57 +08:00
|
|
|
}
|
2021-05-24 14:19:52 +08:00
|
|
|
|
2021-07-03 14:36:18 +08:00
|
|
|
segIdxMap, ok := mt.segID2IndexMeta[segIdxInfo.SegmentID]
|
|
|
|
if !ok {
|
|
|
|
idxMap := map[typeutil.UniqueID]pb.SegmentIndexInfo{segIdxInfo.IndexID: *segIdxInfo}
|
|
|
|
mt.segID2IndexMeta[segIdxInfo.SegmentID] = idxMap
|
2021-07-03 20:00:40 +08:00
|
|
|
|
|
|
|
segIDMap := map[typeutil.UniqueID]bool{segIdxInfo.SegmentID: true}
|
|
|
|
mt.partID2SegID[segIdxInfo.PartitionID] = segIDMap
|
2021-07-03 14:36:18 +08:00
|
|
|
} else {
|
|
|
|
tmpInfo, ok := segIdxMap[segIdxInfo.IndexID]
|
|
|
|
if ok {
|
|
|
|
if SegmentIndexInfoEqual(segIdxInfo, &tmpInfo) {
|
|
|
|
if segIdxInfo.BuildID == tmpInfo.BuildID {
|
2021-05-24 14:19:52 +08:00
|
|
|
log.Debug("Identical SegmentIndexInfo already exist", zap.Int64("IndexID", segIdxInfo.IndexID))
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-05-24 14:19:52 +08:00
|
|
|
}
|
2021-08-18 14:36:10 +08:00
|
|
|
return fmt.Errorf("index id = %d exist", segIdxInfo.IndexID)
|
2021-05-15 18:08:08 +08:00
|
|
|
}
|
2021-04-12 15:03:23 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-03 14:36:18 +08:00
|
|
|
mt.segID2IndexMeta[segIdxInfo.SegmentID][segIdxInfo.IndexID] = *segIdxInfo
|
2021-07-03 20:00:40 +08:00
|
|
|
mt.partID2SegID[segIdxInfo.PartitionID][segIdxInfo.SegmentID] = true
|
|
|
|
|
2021-07-05 10:08:02 +08:00
|
|
|
k := fmt.Sprintf("%s/%d/%d/%d/%d", SegmentIndexMetaPrefix, segIdxInfo.CollectionID, segIdxInfo.IndexID, segIdxInfo.PartitionID, segIdxInfo.SegmentID)
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(segIdxInfo)
|
2021-02-11 08:41:59 +08:00
|
|
|
|
2021-09-23 10:37:54 +08:00
|
|
|
err := mt.client.Save(k, string(v), ts)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV Save fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV Save fail")
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-03-10 14:45:35 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
return nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DropIndex drop index
|
|
|
|
func (mt *MetaTable) DropIndex(collName, fieldName, indexName string, ts typeutil.Timestamp) (typeutil.UniqueID, bool, error) {
|
2021-02-20 15:38:44 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
|
|
|
|
collID, ok := mt.collName2ID[collName]
|
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, false, fmt.Errorf("collection name = %s not exist", collName)
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, false, fmt.Errorf("collection name = %s not has meta", collName)
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
fieldSch, err := mt.unlockGetFieldSchema(collName, fieldName)
|
|
|
|
if err != nil {
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, false, err
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
fieldIdxInfo := make([]*pb.FieldIndexInfo, 0, len(collMeta.FieldIndexes))
|
|
|
|
var dropIdxID typeutil.UniqueID
|
|
|
|
for i, info := range collMeta.FieldIndexes {
|
|
|
|
if info.FiledID != fieldSch.FieldID {
|
|
|
|
fieldIdxInfo = append(fieldIdxInfo, info)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idxMeta, ok := mt.indexID2Meta[info.IndexID]
|
|
|
|
if !ok {
|
|
|
|
fieldIdxInfo = append(fieldIdxInfo, info)
|
2021-02-24 16:25:40 +08:00
|
|
|
log.Warn("index id not has meta", zap.Int64("index id", info.IndexID))
|
2021-02-20 15:38:44 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if idxMeta.IndexName != indexName {
|
|
|
|
fieldIdxInfo = append(fieldIdxInfo, info)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dropIdxID = info.IndexID
|
|
|
|
fieldIdxInfo = append(fieldIdxInfo, collMeta.FieldIndexes[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if len(fieldIdxInfo) == len(collMeta.FieldIndexes) {
|
2021-02-24 16:25:40 +08:00
|
|
|
log.Warn("drop index,index not found", zap.String("collection name", collName), zap.String("filed name", fieldName), zap.String("index name", indexName))
|
2021-08-18 14:36:10 +08:00
|
|
|
return 0, false, nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
collMeta.FieldIndexes = fieldIdxInfo
|
|
|
|
mt.collID2Meta[collID] = collMeta
|
2021-09-23 10:37:54 +08:00
|
|
|
k := path.Join(CollectionMetaPrefix, strconv.FormatInt(collID, 10))
|
|
|
|
v, _ := proto.Marshal(&collMeta)
|
|
|
|
saveMeta := map[string]string{k: string(v)}
|
2021-02-20 15:38:44 +08:00
|
|
|
|
|
|
|
delete(mt.indexID2Meta, dropIdxID)
|
|
|
|
|
2021-07-03 20:00:40 +08:00
|
|
|
// update segID2IndexMeta
|
|
|
|
for partID := range collMeta.PartitionIDs {
|
|
|
|
if segIDMap, ok := mt.partID2SegID[typeutil.UniqueID(partID)]; ok {
|
|
|
|
for segID := range segIDMap {
|
|
|
|
if segIndexInfos, ok := mt.segID2IndexMeta[segID]; ok {
|
|
|
|
delete(segIndexInfos, dropIdxID)
|
|
|
|
}
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-03 20:00:40 +08:00
|
|
|
|
2021-03-06 16:00:41 +08:00
|
|
|
delMeta := []string{
|
|
|
|
fmt.Sprintf("%s/%d/%d", SegmentIndexMetaPrefix, collMeta.ID, dropIdxID),
|
|
|
|
fmt.Sprintf("%s/%d/%d", IndexMetaPrefix, collMeta.ID, dropIdxID),
|
|
|
|
}
|
2021-02-20 15:38:44 +08:00
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
err = mt.client.MultiSaveAndRemoveWithPrefix(saveMeta, delMeta, ts)
|
2021-02-20 15:38:44 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSaveAndRemoveWithPrefix fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSaveAndRemoveWithPrefix fail")
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
|
2021-08-18 14:36:10 +08:00
|
|
|
return dropIdxID, true, nil
|
2021-02-20 15:38:44 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetSegmentIndexInfoByID return segment index info by segment id
|
|
|
|
func (mt *MetaTable) GetSegmentIndexInfoByID(segID typeutil.UniqueID, filedID int64, idxName string) (pb.SegmentIndexInfo, error) {
|
2021-01-21 10:01:29 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
|
|
|
|
segIdxMap, ok := mt.segID2IndexMeta[segID]
|
|
|
|
if !ok {
|
2021-03-10 14:45:35 +08:00
|
|
|
return pb.SegmentIndexInfo{
|
|
|
|
SegmentID: segID,
|
|
|
|
FieldID: filedID,
|
|
|
|
IndexID: 0,
|
|
|
|
BuildID: 0,
|
|
|
|
EnableIndex: false,
|
|
|
|
}, nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-07-03 14:36:18 +08:00
|
|
|
if len(segIdxMap) == 0 {
|
2021-03-05 10:15:27 +08:00
|
|
|
return pb.SegmentIndexInfo{}, fmt.Errorf("segment id %d not has any index", segID)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-01-27 14:41:56 +08:00
|
|
|
if filedID == -1 && idxName == "" { // return default index
|
2021-07-03 14:36:18 +08:00
|
|
|
for _, seg := range segIdxMap {
|
2021-01-27 14:41:56 +08:00
|
|
|
info, ok := mt.indexID2Meta[seg.IndexID]
|
|
|
|
if ok && info.IndexName == Params.DefaultIndexName {
|
|
|
|
return seg, nil
|
|
|
|
}
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-07-03 14:36:18 +08:00
|
|
|
for idxID, seg := range segIdxMap {
|
2021-01-21 10:01:29 +08:00
|
|
|
idxMeta, ok := mt.indexID2Meta[idxID]
|
|
|
|
if ok {
|
|
|
|
if idxMeta.IndexName != idxName {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if seg.FieldID != filedID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return seg, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-05 10:15:27 +08:00
|
|
|
return pb.SegmentIndexInfo{}, fmt.Errorf("can't find index name = %s on segment = %d, with filed id = %d", idxName, segID, filedID)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetFieldSchema return field schema
|
|
|
|
func (mt *MetaTable) GetFieldSchema(collName string, fieldName string) (schemapb.FieldSchema, error) {
|
2021-01-21 10:01:29 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
|
2021-02-02 10:09:10 +08:00
|
|
|
return mt.unlockGetFieldSchema(collName, fieldName)
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) unlockGetFieldSchema(collName string, fieldName string) (schemapb.FieldSchema, error) {
|
2021-01-21 10:01:29 +08:00
|
|
|
collID, ok := mt.collName2ID[collName]
|
|
|
|
if !ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
return schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
return schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, field := range collMeta.Schema.Fields {
|
|
|
|
if field.Name == fieldName {
|
|
|
|
return *field, nil
|
|
|
|
}
|
|
|
|
}
|
2021-03-05 10:15:27 +08:00
|
|
|
return schemapb.FieldSchema{}, fmt.Errorf("collection %s doesn't have filed %s", collName, fieldName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// IsSegmentIndexed check if segment has index
|
|
|
|
func (mt *MetaTable) IsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *schemapb.FieldSchema, indexParams []*commonpb.KeyValuePair) bool {
|
2021-01-21 10:01:29 +08:00
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
2021-02-02 10:09:10 +08:00
|
|
|
return mt.unlockIsSegmentIndexed(segID, fieldSchema, indexParams)
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) unlockIsSegmentIndexed(segID typeutil.UniqueID, fieldSchema *schemapb.FieldSchema, indexParams []*commonpb.KeyValuePair) bool {
|
2021-01-21 10:01:29 +08:00
|
|
|
segIdx, ok := mt.segID2IndexMeta[segID]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
exist := false
|
2021-07-03 14:36:18 +08:00
|
|
|
for idxID, meta := range segIdx {
|
2021-01-21 10:01:29 +08:00
|
|
|
if meta.FieldID != fieldSchema.FieldID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
idxMeta, ok := mt.indexID2Meta[idxID]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if EqualKeyPairArray(indexParams, idxMeta.IndexParams) {
|
|
|
|
exist = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return exist
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetNotIndexedSegments return segment ids which have no index
|
|
|
|
func (mt *MetaTable) GetNotIndexedSegments(collName string, fieldName string, idxInfo *pb.IndexInfo, segIDs []typeutil.UniqueID, ts typeutil.Timestamp) ([]typeutil.UniqueID, schemapb.FieldSchema, error) {
|
2021-02-02 10:09:10 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
|
2021-03-10 16:21:49 +08:00
|
|
|
if idxInfo.IndexParams == nil {
|
|
|
|
return nil, schemapb.FieldSchema{}, fmt.Errorf("index param is nil")
|
|
|
|
}
|
2021-01-21 10:01:29 +08:00
|
|
|
collID, ok := mt.collName2ID[collName]
|
|
|
|
if !ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
return nil, schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
return nil, schemapb.FieldSchema{}, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-02-02 10:09:10 +08:00
|
|
|
fieldSchema, err := mt.unlockGetFieldSchema(collName, fieldName)
|
2021-01-21 10:01:29 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, fieldSchema, err
|
|
|
|
}
|
|
|
|
|
2021-03-10 16:21:49 +08:00
|
|
|
var dupIdx typeutil.UniqueID = 0
|
|
|
|
for _, f := range collMeta.FieldIndexes {
|
2021-04-26 15:53:47 +08:00
|
|
|
if info, ok := mt.indexID2Meta[f.IndexID]; ok {
|
|
|
|
if info.IndexName == idxInfo.IndexName {
|
|
|
|
dupIdx = info.IndexID
|
|
|
|
break
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-10 16:21:49 +08:00
|
|
|
|
|
|
|
exist := false
|
|
|
|
var existInfo pb.IndexInfo
|
|
|
|
for _, f := range collMeta.FieldIndexes {
|
|
|
|
if f.FiledID == fieldSchema.FieldID {
|
|
|
|
existInfo, ok = mt.indexID2Meta[f.IndexID]
|
|
|
|
if !ok {
|
|
|
|
return nil, schemapb.FieldSchema{}, fmt.Errorf("index id = %d not found", f.IndexID)
|
|
|
|
}
|
|
|
|
if EqualKeyPairArray(existInfo.IndexParams, idxInfo.IndexParams) {
|
|
|
|
exist = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exist {
|
2021-02-09 13:11:55 +08:00
|
|
|
idx := &pb.FieldIndexInfo{
|
2021-02-11 08:41:59 +08:00
|
|
|
FiledID: fieldSchema.FieldID,
|
|
|
|
IndexID: idxInfo.IndexID,
|
2021-02-09 13:11:55 +08:00
|
|
|
}
|
|
|
|
collMeta.FieldIndexes = append(collMeta.FieldIndexes, idx)
|
2021-02-02 10:09:10 +08:00
|
|
|
mt.collID2Meta[collMeta.ID] = collMeta
|
|
|
|
k1 := path.Join(CollectionMetaPrefix, strconv.FormatInt(collMeta.ID, 10))
|
2021-09-23 10:37:54 +08:00
|
|
|
v1, _ := proto.Marshal(&collMeta)
|
2021-02-02 10:09:10 +08:00
|
|
|
|
2021-02-11 08:41:59 +08:00
|
|
|
mt.indexID2Meta[idx.IndexID] = *idxInfo
|
|
|
|
k2 := path.Join(IndexMetaPrefix, strconv.FormatInt(idx.IndexID, 10))
|
2021-09-23 10:37:54 +08:00
|
|
|
v2, _ := proto.Marshal(idxInfo)
|
|
|
|
meta := map[string]string{k1: string(v1), k2: string(v2)}
|
2021-02-11 08:41:59 +08:00
|
|
|
|
2021-03-10 16:21:49 +08:00
|
|
|
if dupIdx != 0 {
|
|
|
|
dupInfo := mt.indexID2Meta[dupIdx]
|
|
|
|
dupInfo.IndexName = dupInfo.IndexName + "_bak"
|
|
|
|
mt.indexID2Meta[dupIdx] = dupInfo
|
|
|
|
k := path.Join(IndexMetaPrefix, strconv.FormatInt(dupInfo.IndexID, 10))
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(&dupInfo)
|
|
|
|
meta[k] = string(v)
|
2021-03-10 16:21:49 +08:00
|
|
|
}
|
2021-08-24 21:15:52 +08:00
|
|
|
err = mt.client.MultiSave(meta, ts)
|
2021-02-02 10:09:10 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
2021-02-11 08:41:59 +08:00
|
|
|
} else {
|
|
|
|
idxInfo.IndexID = existInfo.IndexID
|
|
|
|
if existInfo.IndexName != idxInfo.IndexName { //replace index name
|
|
|
|
existInfo.IndexName = idxInfo.IndexName
|
|
|
|
mt.indexID2Meta[existInfo.IndexID] = existInfo
|
|
|
|
k := path.Join(IndexMetaPrefix, strconv.FormatInt(existInfo.IndexID, 10))
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(&existInfo)
|
|
|
|
meta := map[string]string{k: string(v)}
|
2021-03-10 16:21:49 +08:00
|
|
|
if dupIdx != 0 {
|
|
|
|
dupInfo := mt.indexID2Meta[dupIdx]
|
|
|
|
dupInfo.IndexName = dupInfo.IndexName + "_bak"
|
|
|
|
mt.indexID2Meta[dupIdx] = dupInfo
|
|
|
|
k := path.Join(IndexMetaPrefix, strconv.FormatInt(dupInfo.IndexID, 10))
|
2021-09-23 10:37:54 +08:00
|
|
|
v, _ := proto.Marshal(&dupInfo)
|
|
|
|
meta[k] = string(v)
|
2021-03-10 16:21:49 +08:00
|
|
|
}
|
|
|
|
|
2021-08-24 21:15:52 +08:00
|
|
|
err = mt.client.MultiSave(meta, ts)
|
2021-02-11 08:41:59 +08:00
|
|
|
if err != nil {
|
2021-08-17 23:06:10 +08:00
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
2021-02-11 08:41:59 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-02 10:09:10 +08:00
|
|
|
}
|
|
|
|
|
2021-01-21 10:01:29 +08:00
|
|
|
rstID := make([]typeutil.UniqueID, 0, 16)
|
2021-07-03 14:36:18 +08:00
|
|
|
for _, segID := range segIDs {
|
|
|
|
if exist := mt.unlockIsSegmentIndexed(segID, &fieldSchema, idxInfo.IndexParams); !exist {
|
|
|
|
rstID = append(rstID, segID)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return rstID, fieldSchema, nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetIndexByName return index info by index name
|
|
|
|
func (mt *MetaTable) GetIndexByName(collName, indexName string) (pb.CollectionInfo, []pb.IndexInfo, error) {
|
2021-01-21 10:01:29 +08:00
|
|
|
mt.ddLock.RLock()
|
2021-02-08 15:19:48 +08:00
|
|
|
defer mt.ddLock.RUnlock()
|
2021-01-21 10:01:29 +08:00
|
|
|
|
|
|
|
collID, ok := mt.collName2ID[collName]
|
|
|
|
if !ok {
|
2021-04-27 10:30:55 +08:00
|
|
|
return pb.CollectionInfo{}, nil, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
collMeta, ok := mt.collID2Meta[collID]
|
|
|
|
if !ok {
|
2021-04-27 10:30:55 +08:00
|
|
|
return pb.CollectionInfo{}, nil, fmt.Errorf("collection %s not found", collName)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
|
2021-02-11 08:41:59 +08:00
|
|
|
rstIndex := make([]pb.IndexInfo, 0, len(collMeta.FieldIndexes))
|
2021-02-19 09:52:06 +08:00
|
|
|
for _, idx := range collMeta.FieldIndexes {
|
2021-04-26 15:53:47 +08:00
|
|
|
idxInfo, ok := mt.indexID2Meta[idx.IndexID]
|
|
|
|
if !ok {
|
2021-04-27 10:30:55 +08:00
|
|
|
return pb.CollectionInfo{}, nil, fmt.Errorf("index id = %d not found", idx.IndexID)
|
2021-04-26 15:53:47 +08:00
|
|
|
}
|
|
|
|
if indexName == "" || idxInfo.IndexName == indexName {
|
|
|
|
rstIndex = append(rstIndex, idxInfo)
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-27 10:30:55 +08:00
|
|
|
return collMeta, rstIndex, nil
|
2021-01-21 10:01:29 +08:00
|
|
|
}
|
2021-01-27 14:41:56 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// GetIndexByID return index info by index id
|
|
|
|
func (mt *MetaTable) GetIndexByID(indexID typeutil.UniqueID) (*pb.IndexInfo, error) {
|
2021-01-27 14:41:56 +08:00
|
|
|
mt.ddLock.RLock()
|
2021-02-08 15:19:48 +08:00
|
|
|
defer mt.ddLock.RUnlock()
|
2021-01-27 14:41:56 +08:00
|
|
|
|
|
|
|
indexInfo, ok := mt.indexID2Meta[indexID]
|
|
|
|
if !ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
return nil, fmt.Errorf("cannot find index, id = %d", indexID)
|
2021-01-27 14:41:56 +08:00
|
|
|
}
|
|
|
|
return &indexInfo, nil
|
|
|
|
}
|
2021-07-03 17:54:25 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
func (mt *MetaTable) dupMeta() (
|
2021-07-03 17:54:25 +08:00
|
|
|
map[typeutil.UniqueID]pb.CollectionInfo,
|
|
|
|
map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo,
|
|
|
|
map[typeutil.UniqueID]pb.IndexInfo,
|
|
|
|
) {
|
|
|
|
mt.ddLock.RLock()
|
|
|
|
defer mt.ddLock.RUnlock()
|
|
|
|
|
|
|
|
collID2Meta := map[typeutil.UniqueID]pb.CollectionInfo{}
|
|
|
|
segID2IndexMeta := map[typeutil.UniqueID]map[typeutil.UniqueID]pb.SegmentIndexInfo{}
|
|
|
|
indexID2Meta := map[typeutil.UniqueID]pb.IndexInfo{}
|
|
|
|
for k, v := range mt.collID2Meta {
|
|
|
|
collID2Meta[k] = v
|
|
|
|
}
|
|
|
|
for k, v := range mt.segID2IndexMeta {
|
|
|
|
segID2IndexMeta[k] = map[typeutil.UniqueID]pb.SegmentIndexInfo{}
|
|
|
|
for k2, v2 := range v {
|
|
|
|
segID2IndexMeta[k][k2] = v2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for k, v := range mt.indexID2Meta {
|
|
|
|
indexID2Meta[k] = v
|
|
|
|
}
|
|
|
|
return collID2Meta, segID2IndexMeta, indexID2Meta
|
|
|
|
}
|
2021-09-18 11:13:51 +08:00
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AddAlias add collection alias
|
|
|
|
func (mt *MetaTable) AddAlias(collectionAlias string, collectionName string,
|
2021-09-18 11:13:51 +08:00
|
|
|
ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
if _, ok := mt.collAlias2ID[collectionAlias]; ok {
|
|
|
|
return fmt.Errorf("duplicate collection alias, alias = %s", collectionAlias)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := mt.collName2ID[collectionAlias]; ok {
|
|
|
|
return fmt.Errorf("collection alias collides with existing collection name. collection = %s, alias = %s", collectionAlias, collectionAlias)
|
|
|
|
}
|
|
|
|
|
|
|
|
id, ok := mt.collName2ID[collectionName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("aliased collection name does not exist, name = %s", collectionName)
|
|
|
|
}
|
|
|
|
mt.collAlias2ID[collectionAlias] = id
|
|
|
|
|
|
|
|
meta := make(map[string]string)
|
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
|
|
|
saveAlias := func(ts typeutil.Timestamp) (string, string, error) {
|
|
|
|
k1 := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias)
|
2021-09-23 10:37:54 +08:00
|
|
|
v1, _ := proto.Marshal(&pb.CollectionInfo{ID: id, Schema: &schemapb.CollectionSchema{Name: collectionAlias}})
|
|
|
|
meta[k1] = string(v1)
|
|
|
|
return k1, string(v1), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err := mt.client.MultiSave(meta, ts, addition, saveAlias)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// DeleteAlias delete collection alias
|
|
|
|
func (mt *MetaTable) DeleteAlias(collectionAlias string, ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
2021-09-18 11:13:51 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
if _, ok := mt.collAlias2ID[collectionAlias]; !ok {
|
|
|
|
return fmt.Errorf("alias does not exist, alias = %s", collectionAlias)
|
|
|
|
}
|
|
|
|
delete(mt.collAlias2ID, collectionAlias)
|
|
|
|
|
|
|
|
delMetakeys := []string{
|
|
|
|
fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias),
|
|
|
|
}
|
|
|
|
meta := make(map[string]string)
|
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
|
|
|
err := mt.client.MultiSaveAndRemoveWithPrefix(meta, delMetakeys, ts, addition)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-23 15:10:00 +08:00
|
|
|
// AlterAlias alter collection alias
|
|
|
|
func (mt *MetaTable) AlterAlias(collectionAlias string, collectionName string, ts typeutil.Timestamp, ddOpStr func(ts typeutil.Timestamp) (string, error)) error {
|
2021-09-18 11:13:51 +08:00
|
|
|
mt.ddLock.Lock()
|
|
|
|
defer mt.ddLock.Unlock()
|
|
|
|
if _, ok := mt.collAlias2ID[collectionAlias]; !ok {
|
|
|
|
return fmt.Errorf("alias does not exist, alias = %s", collectionAlias)
|
|
|
|
}
|
|
|
|
|
|
|
|
id, ok := mt.collName2ID[collectionName]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("aliased collection name does not exist, name = %s", collectionName)
|
|
|
|
}
|
|
|
|
mt.collAlias2ID[collectionAlias] = id
|
|
|
|
meta := make(map[string]string)
|
|
|
|
addition := mt.getAdditionKV(ddOpStr, meta)
|
|
|
|
alterAlias := func(ts typeutil.Timestamp) (string, string, error) {
|
|
|
|
k1 := fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, collectionAlias)
|
2021-09-23 10:37:54 +08:00
|
|
|
v1, _ := proto.Marshal(&pb.CollectionInfo{ID: id, Schema: &schemapb.CollectionSchema{Name: collectionAlias}})
|
|
|
|
meta[k1] = string(v1)
|
|
|
|
return k1, string(v1), nil
|
2021-09-18 11:13:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
err := mt.client.MultiSave(meta, ts, addition, alterAlias)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("SnapShotKV MultiSave fail", zap.Error(err))
|
|
|
|
panic("SnapShotKV MultiSave fail")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|