2023-01-04 19:37:36 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
// Package datacoord contains core functions in datacoord
|
|
|
|
package datacoord
|
|
|
|
|
|
|
|
import (
|
2023-12-21 18:07:24 +08:00
|
|
|
"context"
|
2023-01-04 19:37:36 +08:00
|
|
|
"fmt"
|
|
|
|
"strconv"
|
2024-03-04 16:56:59 +08:00
|
|
|
"sync"
|
2023-01-04 19:37:36 +08:00
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
2023-09-21 09:45:27 +08:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2023-01-04 19:37:36 +08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2023-06-09 01:28:37 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
2024-03-04 16:56:59 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metastore"
|
2023-01-04 19:37:36 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/common"
|
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
|
|
|
"github.com/milvus-io/milvus/pkg/metrics"
|
2024-03-04 16:56:59 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
2023-01-04 19:37:36 +08:00
|
|
|
)
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
type indexMeta struct {
|
|
|
|
sync.RWMutex
|
|
|
|
ctx context.Context
|
|
|
|
catalog metastore.DataCoordCatalog
|
|
|
|
|
|
|
|
// collectionIndexes records which indexes are on the collection
|
|
|
|
// collID -> indexID -> index
|
|
|
|
indexes map[UniqueID]map[UniqueID]*model.Index
|
|
|
|
// buildID2Meta records the meta information of the segment
|
|
|
|
// buildID -> segmentIndex
|
|
|
|
buildID2SegmentIndex map[UniqueID]*model.SegmentIndex
|
|
|
|
|
|
|
|
// segmentID -> indexID -> segmentIndex
|
|
|
|
segmentIndexes map[UniqueID]map[UniqueID]*model.SegmentIndex
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewMeta creates meta from provided `kv.TxnKV`
|
|
|
|
func newIndexMeta(ctx context.Context, catalog metastore.DataCoordCatalog) (*indexMeta, error) {
|
|
|
|
mt := &indexMeta{
|
|
|
|
ctx: ctx,
|
|
|
|
catalog: catalog,
|
|
|
|
indexes: make(map[UniqueID]map[UniqueID]*model.Index),
|
|
|
|
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
|
|
|
|
segmentIndexes: make(map[UniqueID]map[UniqueID]*model.SegmentIndex),
|
|
|
|
}
|
|
|
|
err := mt.reloadFromKV()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return mt, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// reloadFromKV loads meta from KV storage
|
|
|
|
func (m *indexMeta) reloadFromKV() error {
|
|
|
|
record := timerecord.NewTimeRecorder("indexMeta-reloadFromKV")
|
|
|
|
// load field indexes
|
|
|
|
fieldIndexes, err := m.catalog.ListIndexes(m.ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("indexMeta reloadFromKV load field indexes fail", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, fieldIndex := range fieldIndexes {
|
|
|
|
m.updateCollectionIndex(fieldIndex)
|
|
|
|
}
|
|
|
|
segmentIndexes, err := m.catalog.ListSegmentIndexes(m.ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("indexMeta reloadFromKV load segment indexes fail", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, segIdx := range segmentIndexes {
|
|
|
|
m.updateSegmentIndex(segIdx)
|
|
|
|
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(segIdx.IndexFileKeys)))
|
|
|
|
}
|
|
|
|
log.Info("indexMeta reloadFromKV done", zap.Duration("duration", record.ElapseSpan()))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *indexMeta) updateCollectionIndex(index *model.Index) {
|
2023-01-04 19:37:36 +08:00
|
|
|
if _, ok := m.indexes[index.CollectionID]; !ok {
|
|
|
|
m.indexes[index.CollectionID] = make(map[UniqueID]*model.Index)
|
|
|
|
}
|
|
|
|
m.indexes[index.CollectionID][index.IndexID] = index
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) updateSegmentIndex(segIdx *model.SegmentIndex) {
|
|
|
|
indexes, ok := m.segmentIndexes[segIdx.SegmentID]
|
|
|
|
if ok {
|
|
|
|
indexes[segIdx.IndexID] = segIdx
|
|
|
|
} else {
|
|
|
|
m.segmentIndexes[segIdx.SegmentID] = make(map[UniqueID]*model.SegmentIndex)
|
|
|
|
m.segmentIndexes[segIdx.SegmentID][segIdx.IndexID] = segIdx
|
|
|
|
}
|
2023-01-04 19:37:36 +08:00
|
|
|
m.buildID2SegmentIndex[segIdx.BuildID] = segIdx
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) alterSegmentIndexes(segIdxes []*model.SegmentIndex) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
err := m.catalog.AlterSegmentIndexes(m.ctx, segIdxes)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to alter segments index in meta store", zap.Int("segment indexes num", len(segIdxes)),
|
|
|
|
zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, segIdx := range segIdxes {
|
|
|
|
m.updateSegmentIndex(segIdx)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) updateIndexMeta(index *model.Index, updateFunc func(clonedIndex *model.Index) error) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
return updateFunc(model.CloneIndex(index))
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) updateSegIndexMeta(segIdx *model.SegmentIndex, updateFunc func(clonedSegIdx *model.SegmentIndex) error) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
return updateFunc(model.CloneSegmentIndex(segIdx))
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) updateIndexTasksMetrics() {
|
2023-01-04 19:37:36 +08:00
|
|
|
taskMetrics := make(map[UniqueID]map[commonpb.IndexState]int)
|
|
|
|
for _, segIdx := range m.buildID2SegmentIndex {
|
|
|
|
if segIdx.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := taskMetrics[segIdx.CollectionID]; !ok {
|
|
|
|
taskMetrics[segIdx.CollectionID] = make(map[commonpb.IndexState]int)
|
|
|
|
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Unissued] = 0
|
|
|
|
taskMetrics[segIdx.CollectionID][commonpb.IndexState_InProgress] = 0
|
|
|
|
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Finished] = 0
|
|
|
|
taskMetrics[segIdx.CollectionID][commonpb.IndexState_Failed] = 0
|
|
|
|
}
|
|
|
|
taskMetrics[segIdx.CollectionID][segIdx.IndexState]++
|
|
|
|
}
|
|
|
|
for collID, m := range taskMetrics {
|
|
|
|
for k, v := range m {
|
|
|
|
switch k {
|
|
|
|
case commonpb.IndexState_Unissued:
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.UnissuedIndexTaskLabel).Set(float64(v))
|
2023-01-04 19:37:36 +08:00
|
|
|
case commonpb.IndexState_InProgress:
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.InProgressIndexTaskLabel).Set(float64(v))
|
2023-01-04 19:37:36 +08:00
|
|
|
case commonpb.IndexState_Finished:
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FinishedIndexTaskLabel).Set(float64(v))
|
2023-01-04 19:37:36 +08:00
|
|
|
case commonpb.IndexState_Failed:
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexTaskNum.WithLabelValues(strconv.FormatInt(collID, 10), metrics.FailedIndexTaskLabel).Set(float64(v))
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 14:35:40 +08:00
|
|
|
func checkParams(fieldIndex *model.Index, req *indexpb.CreateIndexRequest) bool {
|
2023-01-04 19:37:36 +08:00
|
|
|
if len(fieldIndex.TypeParams) != len(req.TypeParams) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
notEq := false
|
|
|
|
for _, param1 := range fieldIndex.TypeParams {
|
|
|
|
exist := false
|
|
|
|
for _, param2 := range req.TypeParams {
|
|
|
|
if param2.Key == param1.Key && param2.Value == param1.Value {
|
|
|
|
exist = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
notEq = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if notEq {
|
|
|
|
return false
|
|
|
|
}
|
2024-03-06 16:03:00 +08:00
|
|
|
|
|
|
|
userIndexParamsWithoutMmapKey := make([]*commonpb.KeyValuePair, 0)
|
|
|
|
for _, param := range fieldIndex.UserIndexParams {
|
|
|
|
if param.Key == common.MmapEnabledKey {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
userIndexParamsWithoutMmapKey = append(userIndexParamsWithoutMmapKey, param)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(userIndexParamsWithoutMmapKey) != len(req.GetUserIndexParams()) {
|
2023-01-04 19:37:36 +08:00
|
|
|
return false
|
|
|
|
}
|
2024-03-06 16:03:00 +08:00
|
|
|
for _, param1 := range userIndexParamsWithoutMmapKey {
|
2023-01-04 19:37:36 +08:00
|
|
|
exist := false
|
2023-10-18 20:18:24 +08:00
|
|
|
for _, param2 := range req.GetUserIndexParams() {
|
2023-01-04 19:37:36 +08:00
|
|
|
if param2.Key == param1.Key && param2.Value == param1.Value {
|
|
|
|
exist = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !exist {
|
|
|
|
notEq = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return !notEq
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) CanCreateIndex(req *indexpb.CreateIndexRequest) (UniqueID, error) {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
indexes, ok := m.indexes[req.CollectionID]
|
|
|
|
if !ok {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
for _, index := range indexes {
|
|
|
|
if index.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if req.IndexName == index.IndexName {
|
|
|
|
if req.FieldID == index.FieldID && checkParams(index, req) {
|
|
|
|
return index.IndexID, nil
|
|
|
|
}
|
|
|
|
errMsg := "at most one distinct index is allowed per field"
|
|
|
|
log.Warn(errMsg,
|
|
|
|
zap.String("source index", fmt.Sprintf("{index_name: %s, field_id: %d, index_params: %v, type_params: %v}", index.IndexName, index.FieldID, index.IndexParams, index.TypeParams)),
|
|
|
|
zap.String("current index", fmt.Sprintf("{index_name: %s, field_id: %d, index_params: %v, type_params: %v}", req.GetIndexName(), req.GetFieldID(), req.GetIndexParams(), req.GetTypeParams())))
|
|
|
|
return 0, fmt.Errorf("CreateIndex failed: %s", errMsg)
|
|
|
|
}
|
|
|
|
if req.FieldID == index.FieldID {
|
|
|
|
// creating multiple indexes on same field is not supported
|
|
|
|
errMsg := "CreateIndex failed: creating multiple indexes on same field is not supported"
|
|
|
|
log.Warn(errMsg)
|
|
|
|
return 0, fmt.Errorf(errMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasSameReq determine whether there are same indexing tasks.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) HasSameReq(req *indexpb.CreateIndexRequest) (bool, UniqueID) {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
for _, fieldIndex := range m.indexes[req.CollectionID] {
|
|
|
|
if fieldIndex.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if fieldIndex.FieldID != req.FieldID || fieldIndex.IndexName != req.IndexName {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !checkParams(fieldIndex, req) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
log.Debug("has same index", zap.Int64("collectionID", req.CollectionID),
|
|
|
|
zap.Int64("fieldID", req.FieldID), zap.String("indexName", req.IndexName),
|
|
|
|
zap.Int64("indexID", fieldIndex.IndexID))
|
|
|
|
return true, fieldIndex.IndexID
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, 0
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) CreateIndex(index *model.Index) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
log.Info("meta update: CreateIndex", zap.Int64("collectionID", index.CollectionID),
|
|
|
|
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
if err := m.catalog.CreateIndex(m.ctx, index); err != nil {
|
|
|
|
log.Error("meta update: CreateIndex save meta fail", zap.Int64("collectionID", index.CollectionID),
|
|
|
|
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID),
|
|
|
|
zap.String("indexName", index.IndexName), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
m.updateCollectionIndex(index)
|
|
|
|
log.Info("meta update: CreateIndex success", zap.Int64("collectionID", index.CollectionID),
|
|
|
|
zap.Int64("fieldID", index.FieldID), zap.Int64("indexID", index.IndexID), zap.String("indexName", index.IndexName))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) AlterIndex(ctx context.Context, indexes ...*model.Index) error {
|
2023-12-21 18:07:24 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
err := m.catalog.AlterIndexes(ctx, indexes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, index := range indexes {
|
|
|
|
m.updateCollectionIndex(index)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
// AddSegmentIndex adds the index meta corresponding the indexBuildID to meta table.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) AddSegmentIndex(segIndex *model.SegmentIndex) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
buildID := segIndex.BuildID
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("meta update: adding segment index", zap.Int64("collectionID", segIndex.CollectionID),
|
|
|
|
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("buildID", buildID))
|
|
|
|
|
|
|
|
segIndex.IndexState = commonpb.IndexState_Unissued
|
|
|
|
if err := m.catalog.CreateSegmentIndex(m.ctx, segIndex); err != nil {
|
|
|
|
log.Warn("meta update: adding segment index failed",
|
2023-07-14 15:56:31 +08:00
|
|
|
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("buildID", segIndex.BuildID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
m.updateSegmentIndex(segIndex)
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("meta update: adding segment index success", zap.Int64("collectionID", segIndex.CollectionID),
|
|
|
|
zap.Int64("segmentID", segIndex.SegmentID), zap.Int64("indexID", segIndex.IndexID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("buildID", buildID))
|
|
|
|
m.updateIndexTasksMetrics()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetIndexIDByName(collID int64, indexName string) map[int64]uint64 {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
indexID2CreateTs := make(map[int64]uint64)
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
|
|
|
return indexID2CreateTs
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, index := range fieldIndexes {
|
|
|
|
if !index.IsDeleted && (indexName == "" || index.IndexName == indexName) {
|
|
|
|
indexID2CreateTs[index.IndexID] = index.CreateTime
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return indexID2CreateTs
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetSegmentIndexState(collID, segmentID UniqueID, indexID UniqueID) *indexpb.SegmentIndexState {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2023-12-29 11:44:45 +08:00
|
|
|
state := &indexpb.SegmentIndexState{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
State: commonpb.IndexState_IndexStateNone,
|
|
|
|
FailReason: "",
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
2023-12-29 11:44:45 +08:00
|
|
|
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
2024-03-04 16:56:59 +08:00
|
|
|
|
|
|
|
indexes, ok := m.segmentIndexes[segmentID]
|
|
|
|
if !ok {
|
|
|
|
state.State = commonpb.IndexState_Unissued
|
|
|
|
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
|
2023-12-29 11:44:45 +08:00
|
|
|
return state
|
|
|
|
}
|
|
|
|
|
|
|
|
if index, ok := fieldIndexes[indexID]; ok && !index.IsDeleted {
|
2024-03-04 16:56:59 +08:00
|
|
|
if segIdx, ok := indexes[indexID]; ok {
|
2023-12-29 11:44:45 +08:00
|
|
|
state.IndexName = index.IndexName
|
|
|
|
state.State = segIdx.IndexState
|
|
|
|
state.FailReason = segIdx.FailReason
|
|
|
|
return state
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
2023-12-29 11:44:45 +08:00
|
|
|
state.State = commonpb.IndexState_Unissued
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
2023-12-29 11:44:45 +08:00
|
|
|
|
|
|
|
state.FailReason = fmt.Sprintf("there is no index on indexID: %d", indexID)
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetSegmentIndexStateOnField(collID, segmentID, fieldID UniqueID) *indexpb.SegmentIndexState {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2023-12-29 11:44:45 +08:00
|
|
|
state := &indexpb.SegmentIndexState{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
State: commonpb.IndexState_IndexStateNone,
|
|
|
|
FailReason: "",
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
2023-12-29 11:44:45 +08:00
|
|
|
state.FailReason = fmt.Sprintf("collection not exist with ID: %d", collID)
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
2024-03-04 16:56:59 +08:00
|
|
|
|
|
|
|
indexes, ok := m.segmentIndexes[segmentID]
|
|
|
|
if !ok {
|
|
|
|
state.FailReason = fmt.Sprintf("segment index not exist with ID: %d", segmentID)
|
|
|
|
state.State = commonpb.IndexState_Unissued
|
2023-12-29 11:44:45 +08:00
|
|
|
return state
|
|
|
|
}
|
2024-03-04 16:56:59 +08:00
|
|
|
|
2023-12-29 11:44:45 +08:00
|
|
|
for indexID, index := range fieldIndexes {
|
|
|
|
if index.FieldID == fieldID && !index.IsDeleted {
|
2024-03-04 16:56:59 +08:00
|
|
|
if segIdx, ok := indexes[indexID]; ok {
|
2023-12-29 11:44:45 +08:00
|
|
|
state.IndexName = index.IndexName
|
|
|
|
state.State = segIdx.IndexState
|
|
|
|
state.FailReason = segIdx.FailReason
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
2023-12-29 11:44:45 +08:00
|
|
|
state.State = commonpb.IndexState_Unissued
|
|
|
|
return state
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
2023-12-29 11:44:45 +08:00
|
|
|
state.FailReason = fmt.Sprintf("there is no index on fieldID: %d", fieldID)
|
2023-01-04 19:37:36 +08:00
|
|
|
return state
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetIndexesForCollection gets all indexes info with the specified collection.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetIndexesForCollection(collID UniqueID, indexName string) []*model.Index {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
indexInfos := make([]*model.Index, 0)
|
|
|
|
for _, index := range m.indexes[collID] {
|
|
|
|
if index.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if indexName == "" || indexName == index.IndexName {
|
|
|
|
indexInfos = append(indexInfos, model.CloneIndex(index))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return indexInfos
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetFieldIndexes(collID, fieldID UniqueID, indexName string) []*model.Index {
|
2023-12-20 17:22:41 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
indexInfos := make([]*model.Index, 0)
|
|
|
|
for _, index := range m.indexes[collID] {
|
|
|
|
if index.IsDeleted || index.FieldID != fieldID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if indexName == "" || indexName == index.IndexName {
|
|
|
|
indexInfos = append(indexInfos, model.CloneIndex(index))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return indexInfos
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
// MarkIndexAsDeleted will mark the corresponding index as deleted, and recycleUnusedIndexFiles will recycle these tasks.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) MarkIndexAsDeleted(collID UniqueID, indexIDs []UniqueID) error {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("IndexCoord metaTable MarkIndexAsDeleted", zap.Int64("collectionID", collID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64s("indexIDs", indexIDs))
|
|
|
|
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
indexes := make([]*model.Index, 0)
|
|
|
|
for _, indexID := range indexIDs {
|
|
|
|
index, ok := fieldIndexes[indexID]
|
|
|
|
if !ok || index.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
clonedIndex := model.CloneIndex(index)
|
|
|
|
clonedIndex.IsDeleted = true
|
|
|
|
indexes = append(indexes, clonedIndex)
|
|
|
|
}
|
|
|
|
if len(indexes) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
err := m.catalog.AlterIndexes(m.ctx, indexes)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to alter index meta in meta store", zap.Int("indexes num", len(indexes)), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, index := range indexes {
|
|
|
|
m.indexes[index.CollectionID][index.IndexID] = index
|
|
|
|
}
|
|
|
|
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("IndexCoord metaTable MarkIndexAsDeleted success", zap.Int64("collectionID", collID), zap.Int64s("indexIDs", indexIDs))
|
2023-01-04 19:37:36 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) IsUnIndexedSegment(collectionID UniqueID, segID UniqueID) bool {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
fieldIndexes, ok := m.indexes[collectionID]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// the segment should be unindexed status if the fieldIndexes is not nil
|
|
|
|
segIndexInfos, ok := m.segmentIndexes[segID]
|
|
|
|
if !ok || len(segIndexInfos) == 0 {
|
|
|
|
return true
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
2024-03-04 16:56:59 +08:00
|
|
|
|
|
|
|
for _, index := range fieldIndexes {
|
|
|
|
if _, ok := segIndexInfos[index.IndexID]; !index.IsDeleted {
|
|
|
|
if !ok {
|
|
|
|
// the segment should be unindexed status if the segment index is not found within field indexes
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *indexMeta) getSegmentIndexes(segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
ret := make(map[UniqueID]*model.SegmentIndex, 0)
|
|
|
|
segIndexInfos, ok := m.segmentIndexes[segID]
|
|
|
|
if !ok || len(segIndexInfos) == 0 {
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, segIdx := range segIndexInfos {
|
|
|
|
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *indexMeta) GetSegmentIndexes(collectionID UniqueID, segID UniqueID) map[UniqueID]*model.SegmentIndex {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
ret := make(map[UniqueID]*model.SegmentIndex, 0)
|
|
|
|
segIndexInfos, ok := m.segmentIndexes[segID]
|
|
|
|
if !ok || len(segIndexInfos) == 0 {
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collectionID]
|
2023-01-04 19:37:36 +08:00
|
|
|
if !ok {
|
2024-03-04 16:56:59 +08:00
|
|
|
return ret
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
for _, segIdx := range segIndexInfos {
|
|
|
|
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
|
|
|
ret[segIdx.IndexID] = model.CloneSegmentIndex(segIdx)
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
2024-03-04 16:56:59 +08:00
|
|
|
return ret
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetFieldIDByIndexID(collID, indexID UniqueID) UniqueID {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
if fieldIndexes, ok := m.indexes[collID]; ok {
|
|
|
|
if index, ok := fieldIndexes[indexID]; ok {
|
|
|
|
return index.FieldID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetIndexNameByID(collID, indexID UniqueID) string {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
if fieldIndexes, ok := m.indexes[collID]; ok {
|
|
|
|
if index, ok := fieldIndexes[indexID]; ok {
|
|
|
|
return index.IndexName
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetIndexParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
index, ok := fieldIndexes[indexID]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
indexParams := make([]*commonpb.KeyValuePair, 0, len(index.IndexParams))
|
|
|
|
|
|
|
|
for _, param := range index.IndexParams {
|
|
|
|
indexParams = append(indexParams, proto.Clone(param).(*commonpb.KeyValuePair))
|
|
|
|
}
|
|
|
|
|
|
|
|
return indexParams
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetTypeParams(collID, indexID UniqueID) []*commonpb.KeyValuePair {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
index, ok := fieldIndexes[indexID]
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
typeParams := make([]*commonpb.KeyValuePair, 0, len(index.TypeParams))
|
|
|
|
|
|
|
|
for _, param := range index.TypeParams {
|
|
|
|
typeParams = append(typeParams, proto.Clone(param).(*commonpb.KeyValuePair))
|
|
|
|
}
|
|
|
|
|
|
|
|
return typeParams
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetIndexJob(buildID UniqueID) (*model.SegmentIndex, bool) {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
segIdx, ok := m.buildID2SegmentIndex[buildID]
|
|
|
|
if ok {
|
|
|
|
return model.CloneSegmentIndex(segIdx), true
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) IsIndexExist(collID, indexID UniqueID) bool {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
fieldIndexes, ok := m.indexes[collID]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if index, ok := fieldIndexes[indexID]; !ok || index.IsDeleted {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateVersion updates the version and nodeID of the index meta, whenever the task is built once, the version will be updated once.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) UpdateVersion(buildID UniqueID, nodeID UniqueID) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
log.Debug("IndexCoord metaTable UpdateVersion receive", zap.Int64("buildID", buildID), zap.Int64("nodeID", nodeID))
|
|
|
|
segIdx, ok := m.buildID2SegmentIndex[buildID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("there is no index with buildID: %d", buildID)
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFunc := func(segIdx *model.SegmentIndex) error {
|
|
|
|
segIdx.NodeID = nodeID
|
|
|
|
segIdx.IndexVersion++
|
|
|
|
return m.alterSegmentIndexes([]*model.SegmentIndex{segIdx})
|
|
|
|
}
|
|
|
|
|
|
|
|
return m.updateSegIndexMeta(segIdx, updateFunc)
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) FinishTask(taskInfo *indexpb.IndexTaskInfo) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
2023-09-15 10:07:19 +08:00
|
|
|
segIdx, ok := m.buildID2SegmentIndex[taskInfo.GetBuildID()]
|
2023-01-04 19:37:36 +08:00
|
|
|
if !ok {
|
2023-09-15 10:07:19 +08:00
|
|
|
log.Warn("there is no index with buildID", zap.Int64("buildID", taskInfo.GetBuildID()))
|
2023-01-04 19:37:36 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
updateFunc := func(segIdx *model.SegmentIndex) error {
|
2023-09-15 10:07:19 +08:00
|
|
|
segIdx.IndexState = taskInfo.GetState()
|
|
|
|
segIdx.IndexFileKeys = common.CloneStringList(taskInfo.GetIndexFileKeys())
|
|
|
|
segIdx.FailReason = taskInfo.GetFailReason()
|
|
|
|
segIdx.IndexSize = taskInfo.GetSerializedSize()
|
2023-09-28 18:03:28 +08:00
|
|
|
segIdx.CurrentIndexVersion = taskInfo.GetCurrentIndexVersion()
|
2023-01-04 19:37:36 +08:00
|
|
|
return m.alterSegmentIndexes([]*model.SegmentIndex{segIdx})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.updateSegIndexMeta(segIdx, updateFunc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-09-15 10:07:19 +08:00
|
|
|
log.Info("finish index task success", zap.Int64("buildID", taskInfo.GetBuildID()),
|
2023-09-28 18:03:28 +08:00
|
|
|
zap.String("state", taskInfo.GetState().String()), zap.String("fail reason", taskInfo.GetFailReason()),
|
|
|
|
zap.Int32("current_index_version", taskInfo.GetCurrentIndexVersion()),
|
|
|
|
)
|
2023-01-04 19:37:36 +08:00
|
|
|
m.updateIndexTasksMetrics()
|
2023-09-15 10:07:19 +08:00
|
|
|
metrics.FlushedSegmentFileNum.WithLabelValues(metrics.IndexFileLabel).Observe(float64(len(taskInfo.GetIndexFileKeys())))
|
2023-01-04 19:37:36 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) DeleteTask(buildID int64) error {
|
2023-07-21 17:15:00 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
segIdx, ok := m.buildID2SegmentIndex[buildID]
|
|
|
|
if !ok {
|
|
|
|
log.Warn("there is no index with buildID", zap.Int64("buildID", buildID))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFunc := func(segIdx *model.SegmentIndex) error {
|
|
|
|
segIdx.IsDeleted = true
|
|
|
|
return m.alterSegmentIndexes([]*model.SegmentIndex{segIdx})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.updateSegIndexMeta(segIdx, updateFunc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("delete index task success", zap.Int64("buildID", buildID))
|
|
|
|
m.updateIndexTasksMetrics()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
// BuildIndex set the index state to be InProgress. It means IndexNode is building the index.
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) BuildIndex(buildID UniqueID) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
segIdx, ok := m.buildID2SegmentIndex[buildID]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("there is no index with buildID: %d", buildID)
|
|
|
|
}
|
|
|
|
|
|
|
|
updateFunc := func(segIdx *model.SegmentIndex) error {
|
|
|
|
segIdx.IndexState = commonpb.IndexState_InProgress
|
|
|
|
|
|
|
|
err := m.alterSegmentIndexes([]*model.SegmentIndex{segIdx})
|
|
|
|
if err != nil {
|
|
|
|
log.Error("meta Update: segment index in progress fail", zap.Int64("buildID", segIdx.BuildID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err := m.updateSegIndexMeta(segIdx, updateFunc); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Info("meta update: segment index in progress success", zap.Int64("buildID", segIdx.BuildID),
|
2023-07-14 15:56:31 +08:00
|
|
|
zap.Int64("segmentID", segIdx.SegmentID))
|
2023-01-04 19:37:36 +08:00
|
|
|
|
|
|
|
m.updateIndexTasksMetrics()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetAllSegIndexes() map[int64]*model.SegmentIndex {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
segIndexes := make(map[int64]*model.SegmentIndex, len(m.buildID2SegmentIndex))
|
|
|
|
for buildID, segIndex := range m.buildID2SegmentIndex {
|
|
|
|
segIndexes[buildID] = model.CloneSegmentIndex(segIndex)
|
|
|
|
}
|
|
|
|
return segIndexes
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) RemoveSegmentIndex(collID, partID, segID, indexID, buildID UniqueID) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
|
|
|
|
err := m.catalog.DropSegmentIndex(m.ctx, collID, partID, segID, buildID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
if _, ok := m.segmentIndexes[segID]; ok {
|
|
|
|
delete(m.segmentIndexes[segID], indexID)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(m.segmentIndexes[segID]) == 0 {
|
|
|
|
delete(m.segmentIndexes, segID)
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
delete(m.buildID2SegmentIndex, buildID)
|
|
|
|
m.updateIndexTasksMetrics()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetDeletedIndexes() []*model.Index {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
deletedIndexes := make([]*model.Index, 0)
|
|
|
|
for _, fieldIndexes := range m.indexes {
|
|
|
|
for _, index := range fieldIndexes {
|
|
|
|
if index.IsDeleted {
|
|
|
|
deletedIndexes = append(deletedIndexes, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return deletedIndexes
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) RemoveIndex(collID, indexID UniqueID) error {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("IndexCoord meta table remove index", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
|
2023-01-04 19:37:36 +08:00
|
|
|
err := m.catalog.DropIndex(m.ctx, collID, indexID)
|
|
|
|
if err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("IndexCoord meta table remove index fail", zap.Int64("collectionID", collID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("indexID", indexID), zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(m.indexes[collID], indexID)
|
|
|
|
if len(m.indexes[collID]) == 0 {
|
|
|
|
delete(m.indexes, collID)
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.UnissuedIndexTaskLabel})
|
|
|
|
metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.InProgressIndexTaskLabel})
|
|
|
|
metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.FinishedIndexTaskLabel})
|
|
|
|
metrics.IndexTaskNum.Delete(prometheus.Labels{"collection_id": strconv.FormatInt(collID, 10), "index_task_status": metrics.FailedIndexTaskLabel})
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("IndexCoord meta table remove index success", zap.Int64("collectionID", collID), zap.Int64("indexID", indexID))
|
2023-01-04 19:37:36 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) CleanSegmentIndex(buildID UniqueID) (bool, *model.SegmentIndex) {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
if segIndex, ok := m.buildID2SegmentIndex[buildID]; ok {
|
|
|
|
if segIndex.IndexState == commonpb.IndexState_Finished {
|
|
|
|
return true, model.CloneSegmentIndex(segIndex)
|
|
|
|
}
|
|
|
|
return false, model.CloneSegmentIndex(segIndex)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2024-03-04 16:56:59 +08:00
|
|
|
func (m *indexMeta) GetMetasByNodeID(nodeID UniqueID) []*model.SegmentIndex {
|
2023-01-04 19:37:36 +08:00
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
metas := make([]*model.SegmentIndex, 0)
|
|
|
|
for _, segIndex := range m.buildID2SegmentIndex {
|
|
|
|
if segIndex.IsDeleted {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if nodeID == segIndex.NodeID {
|
|
|
|
metas = append(metas, model.CloneSegmentIndex(segIndex))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return metas
|
|
|
|
}
|
2024-03-15 16:37:09 +08:00
|
|
|
|
|
|
|
func (m *indexMeta) getSegmentsIndexStates(collectionID UniqueID, segmentIDs []UniqueID) map[int64]map[int64]*indexpb.SegmentIndexState {
|
|
|
|
m.RLock()
|
|
|
|
defer m.RUnlock()
|
|
|
|
|
|
|
|
ret := make(map[int64]map[int64]*indexpb.SegmentIndexState, 0)
|
|
|
|
fieldIndexes, ok := m.indexes[collectionID]
|
|
|
|
if !ok {
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, segID := range segmentIDs {
|
|
|
|
ret[segID] = make(map[int64]*indexpb.SegmentIndexState)
|
|
|
|
segIndexInfos, ok := m.segmentIndexes[segID]
|
|
|
|
if !ok || len(segIndexInfos) == 0 {
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, segIdx := range segIndexInfos {
|
|
|
|
if index, ok := fieldIndexes[segIdx.IndexID]; ok && !index.IsDeleted {
|
|
|
|
ret[segID][segIdx.IndexID] = &indexpb.SegmentIndexState{
|
|
|
|
SegmentID: segID,
|
|
|
|
State: segIdx.IndexState,
|
|
|
|
FailReason: segIdx.FailReason,
|
|
|
|
IndexName: index.IndexName,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|