2023-01-04 19:37:36 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package datacoord
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2023-06-09 01:28:37 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
2023-01-04 19:37:36 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
2023-01-11 14:35:40 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
|
|
|
"github.com/milvus-io/milvus/pkg/metrics"
|
2023-05-06 10:34:39 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/metautil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
2023-06-01 18:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
2023-01-04 19:37:36 +08:00
|
|
|
)
|
|
|
|
|
2023-05-06 10:34:39 +08:00
|
|
|
// serverID return the session serverID
|
|
|
|
func (s *Server) serverID() int64 {
|
|
|
|
if s.session != nil {
|
|
|
|
return s.session.ServerID
|
|
|
|
}
|
|
|
|
// return 0 if no session exist, only for UT
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
func (s *Server) startIndexService(ctx context.Context) {
|
|
|
|
s.indexBuilder.Start()
|
|
|
|
|
|
|
|
s.serverLoopWg.Add(1)
|
|
|
|
go s.createIndexForSegmentLoop(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) createIndexForSegment(segment *SegmentInfo, indexID UniqueID) error {
|
2023-09-28 18:03:28 +08:00
|
|
|
log.Info("create index for segment", zap.Int64("segmentID", segment.ID), zap.Int64("indexID", indexID))
|
2023-01-04 19:37:36 +08:00
|
|
|
buildID, err := s.allocator.allocID(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
segIndex := &model.SegmentIndex{
|
2023-09-28 18:03:28 +08:00
|
|
|
SegmentID: segment.ID,
|
|
|
|
CollectionID: segment.CollectionID,
|
|
|
|
PartitionID: segment.PartitionID,
|
|
|
|
NumRows: segment.NumOfRows,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
CreateTime: uint64(segment.ID),
|
|
|
|
WriteHandoff: false,
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
if err = s.meta.AddSegmentIndex(segIndex); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
s.indexBuilder.enqueue(buildID)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) createIndexesForSegment(segment *SegmentInfo) error {
|
|
|
|
indexes := s.meta.GetIndexesForCollection(segment.CollectionID, "")
|
|
|
|
for _, index := range indexes {
|
|
|
|
if _, ok := segment.segmentIndexes[index.IndexID]; !ok {
|
|
|
|
if err := s.createIndexForSegment(segment, index.IndexID); err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("create index for segment fail", zap.Int64("segmentID", segment.ID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("indexID", index.IndexID))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Server) createIndexForSegmentLoop(ctx context.Context) {
|
|
|
|
log.Info("start create index for segment loop...")
|
|
|
|
defer s.serverLoopWg.Done()
|
|
|
|
|
|
|
|
ticker := time.NewTicker(time.Minute)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Warn("DataCoord context done, exit...")
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
segments := s.meta.GetHasUnindexTaskSegments()
|
|
|
|
for _, segment := range segments {
|
|
|
|
if err := s.createIndexesForSegment(segment); err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2023-07-14 15:56:31 +08:00
|
|
|
case collectionID := <-s.notifyIndexChan:
|
|
|
|
log.Info("receive create index notify", zap.Int64("collectionID", collectionID))
|
2023-01-04 19:37:36 +08:00
|
|
|
segments := s.meta.SelectSegments(func(info *SegmentInfo) bool {
|
2023-07-14 15:56:31 +08:00
|
|
|
return isFlush(info) && collectionID == info.CollectionID
|
2023-01-04 19:37:36 +08:00
|
|
|
})
|
|
|
|
for _, segment := range segments {
|
|
|
|
if err := s.createIndexesForSegment(segment); err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case segID := <-s.buildIndexCh:
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("receive new flushed segment", zap.Int64("segmentID", segID))
|
2023-03-03 14:13:49 +08:00
|
|
|
segment := s.meta.GetSegment(segID)
|
2023-01-04 19:37:36 +08:00
|
|
|
if segment == nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("segment is not exist, no need to build index", zap.Int64("segmentID", segID))
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := s.createIndexesForSegment(segment); err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("create index for segment fail, wait for retry", zap.Int64("segmentID", segment.ID))
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateIndex create an index on collection.
|
|
|
|
// Index building is asynchronous, so when an index building request comes, an IndexID is assigned to the task and
|
|
|
|
// will get all flushed segments from DataCoord and record tasks with these segments. The background process
|
|
|
|
// indexBuilder will find this task and assign it to IndexNode for execution.
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) CreateIndex(ctx context.Context, req *indexpb.CreateIndexRequest) (*commonpb.Status, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
|
|
|
log.Info("receive CreateIndex request",
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.String("IndexName", req.GetIndexName()), zap.Int64("fieldID", req.GetFieldID()),
|
|
|
|
zap.Any("TypeParams", req.GetTypeParams()),
|
2023-09-26 17:15:27 +08:00
|
|
|
zap.Any("IndexParams", req.GetIndexParams()),
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.TotalLabel).Inc()
|
2023-01-04 19:37:36 +08:00
|
|
|
|
|
|
|
indexID, err := s.meta.CanCreateIndex(req)
|
|
|
|
if err != nil {
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc()
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if indexID == 0 {
|
|
|
|
indexID, err = s.allocator.allocID(ctx)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to alloc indexID", zap.Error(err))
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc()
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
if getIndexType(req.GetIndexParams()) == diskAnnIndex && !s.indexNodeManager.ClientSupportDisk() {
|
|
|
|
errMsg := "all IndexNodes do not support disk indexes, please verify"
|
|
|
|
log.Warn(errMsg)
|
2023-09-26 17:15:27 +08:00
|
|
|
err = merr.WrapErrIndexNotSupported(diskAnnIndex)
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc()
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
index := &model.Index{
|
|
|
|
CollectionID: req.GetCollectionID(),
|
|
|
|
FieldID: req.GetFieldID(),
|
|
|
|
IndexID: indexID,
|
|
|
|
IndexName: req.GetIndexName(),
|
|
|
|
TypeParams: req.GetTypeParams(),
|
|
|
|
IndexParams: req.GetIndexParams(),
|
|
|
|
CreateTime: req.GetTimestamp(),
|
|
|
|
IsAutoIndex: req.GetIsAutoIndex(),
|
|
|
|
UserIndexParams: req.GetUserIndexParams(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get flushed segments and create index
|
|
|
|
|
|
|
|
err = s.meta.CreateIndex(index)
|
|
|
|
if err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Error("CreateIndex fail",
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("fieldID", req.GetFieldID()), zap.String("indexName", req.GetIndexName()), zap.Error(err))
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.FailLabel).Inc()
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.notifyIndexChan <- req.GetCollectionID():
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("CreateIndex successfully",
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.String("IndexName", req.GetIndexName()), zap.Int64("fieldID", req.GetFieldID()),
|
|
|
|
zap.Int64("IndexID", indexID))
|
2023-01-06 14:21:37 +08:00
|
|
|
metrics.IndexRequestCounter.WithLabelValues(metrics.SuccessLabel).Inc()
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(nil), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetIndexState gets the index state of the index name in the request from Proxy.
|
2023-05-06 10:34:39 +08:00
|
|
|
// Deprecated
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRequest) (*indexpb.GetIndexStateResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-10-08 21:23:32 +08:00
|
|
|
zap.String("indexName", req.GetIndexName()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
2023-10-08 21:23:32 +08:00
|
|
|
log.Info("receive GetIndexState request")
|
2023-01-04 19:37:36 +08:00
|
|
|
|
2023-09-26 17:15:27 +08:00
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexStateResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexes) == 0 {
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexNotFound(req.GetIndexName())
|
2023-10-08 21:23:32 +08:00
|
|
|
log.Warn("GetIndexState fail", zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexStateResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
if len(indexes) > 1 {
|
|
|
|
log.Warn(msgAmbiguousIndexName())
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexDuplicate(req.GetIndexName())
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexStateResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2023-01-11 14:35:40 +08:00
|
|
|
ret := &indexpb.GetIndexStateResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
|
|
|
State: commonpb.IndexState_Finished,
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
2023-10-08 21:23:32 +08:00
|
|
|
indexInfo := &indexpb.IndexInfo{}
|
2023-01-04 19:37:36 +08:00
|
|
|
s.completeIndexInfo(indexInfo, indexes[0], s.meta.SelectSegments(func(info *SegmentInfo) bool {
|
2023-02-28 10:29:50 +08:00
|
|
|
return isFlush(info) && info.CollectionID == req.GetCollectionID()
|
2023-07-25 10:05:00 +08:00
|
|
|
}), false, indexes[0].CreateTime)
|
2023-01-04 19:37:36 +08:00
|
|
|
ret.State = indexInfo.State
|
|
|
|
ret.FailReason = indexInfo.IndexStateFailReason
|
|
|
|
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("GetIndexState success",
|
2023-10-08 21:23:32 +08:00
|
|
|
zap.String("state", ret.GetState().String()),
|
|
|
|
)
|
2023-01-04 19:37:36 +08:00
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
|
|
|
log.Info("receive GetSegmentIndexState",
|
2023-09-26 17:15:27 +08:00
|
|
|
zap.String("IndexName", req.GetIndexName()),
|
|
|
|
zap.Int64s("fieldID", req.GetSegmentIDs()),
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetSegmentIndexStateResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-01-11 14:35:40 +08:00
|
|
|
ret := &indexpb.GetSegmentIndexStateResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
2023-01-11 14:35:40 +08:00
|
|
|
States: make([]*indexpb.SegmentIndexState, 0),
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
indexID2CreateTs := s.meta.GetIndexIDByName(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexID2CreateTs) == 0 {
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexNotFound(req.GetIndexName())
|
|
|
|
log.Warn("GetSegmentIndexState fail", zap.String("indexName", req.GetIndexName()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetSegmentIndexStateResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2023-09-15 10:07:19 +08:00
|
|
|
for _, segID := range req.GetSegmentIDs() {
|
2023-01-04 19:37:36 +08:00
|
|
|
state := s.meta.GetSegmentIndexState(req.GetCollectionID(), segID)
|
2023-01-11 14:35:40 +08:00
|
|
|
ret.States = append(ret.States, &indexpb.SegmentIndexState{
|
2023-01-04 19:37:36 +08:00
|
|
|
SegmentID: segID,
|
|
|
|
State: state.state,
|
|
|
|
FailReason: state.failReason,
|
|
|
|
})
|
|
|
|
}
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("GetSegmentIndexState successfully", zap.String("indexName", req.GetIndexName()))
|
2023-01-04 19:37:36 +08:00
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2023-06-01 18:14:32 +08:00
|
|
|
func (s *Server) countIndexedRows(indexInfo *indexpb.IndexInfo, segments []*SegmentInfo) int64 {
|
|
|
|
unIndexed, indexed := typeutil.NewSet[int64](), typeutil.NewSet[int64]()
|
|
|
|
for _, seg := range segments {
|
|
|
|
segIdx, ok := seg.segmentIndexes[indexInfo.IndexID]
|
|
|
|
if !ok {
|
|
|
|
unIndexed.Insert(seg.GetID())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch segIdx.IndexState {
|
|
|
|
case commonpb.IndexState_Finished:
|
|
|
|
indexed.Insert(seg.GetID())
|
|
|
|
default:
|
|
|
|
unIndexed.Insert(seg.GetID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
retrieveContinue := len(unIndexed) != 0
|
|
|
|
for retrieveContinue {
|
|
|
|
for segID := range unIndexed {
|
|
|
|
unIndexed.Remove(segID)
|
|
|
|
segment := s.meta.GetSegment(segID)
|
|
|
|
if segment == nil || len(segment.CompactionFrom) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, fromID := range segment.CompactionFrom {
|
|
|
|
fromSeg := s.meta.GetSegment(fromID)
|
|
|
|
if fromSeg == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if segIndex, ok := fromSeg.segmentIndexes[indexInfo.IndexID]; ok && segIndex.IndexState == commonpb.IndexState_Finished {
|
|
|
|
indexed.Insert(fromID)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
unIndexed.Insert(fromID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
retrieveContinue = len(unIndexed) != 0
|
|
|
|
}
|
|
|
|
indexedRows := int64(0)
|
|
|
|
for segID := range indexed {
|
|
|
|
segment := s.meta.GetSegment(segID)
|
|
|
|
if segment != nil {
|
|
|
|
indexedRows += segment.GetNumOfRows()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return indexedRows
|
|
|
|
}
|
|
|
|
|
2023-05-06 10:34:39 +08:00
|
|
|
// completeIndexInfo get the index row count and index task state
|
|
|
|
// if realTime, calculate current statistics
|
|
|
|
// if not realTime, which means get info of the prior `CreateIndex` action, skip segments created after index's create time
|
2023-07-25 10:05:00 +08:00
|
|
|
func (s *Server) completeIndexInfo(indexInfo *indexpb.IndexInfo, index *model.Index, segments []*SegmentInfo, realTime bool, ts Timestamp) {
|
2023-01-04 19:37:36 +08:00
|
|
|
var (
|
2023-06-01 18:14:32 +08:00
|
|
|
cntNone = 0
|
|
|
|
cntUnissued = 0
|
|
|
|
cntInProgress = 0
|
|
|
|
cntFinished = 0
|
|
|
|
cntFailed = 0
|
|
|
|
failReason string
|
|
|
|
totalRows = int64(0)
|
|
|
|
indexedRows = int64(0)
|
|
|
|
pendingIndexRows = int64(0)
|
2023-01-04 19:37:36 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
for _, seg := range segments {
|
|
|
|
totalRows += seg.NumOfRows
|
|
|
|
segIdx, ok := seg.segmentIndexes[index.IndexID]
|
2023-03-20 10:21:56 +08:00
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
if !ok {
|
2023-09-04 17:29:48 +08:00
|
|
|
if seg.GetLastExpireTime() <= ts {
|
2023-01-04 19:37:36 +08:00
|
|
|
cntUnissued++
|
|
|
|
}
|
2023-06-01 18:14:32 +08:00
|
|
|
pendingIndexRows += seg.GetNumOfRows()
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
2023-06-01 18:14:32 +08:00
|
|
|
if segIdx.IndexState != commonpb.IndexState_Finished {
|
|
|
|
pendingIndexRows += seg.GetNumOfRows()
|
|
|
|
}
|
2023-03-20 10:21:56 +08:00
|
|
|
|
2023-05-06 10:34:39 +08:00
|
|
|
// if realTime, calculate current statistics
|
|
|
|
// if not realTime, skip segments created after index create
|
2023-09-04 17:29:48 +08:00
|
|
|
if !realTime && seg.GetLastExpireTime() > ts {
|
2023-01-04 19:37:36 +08:00
|
|
|
continue
|
|
|
|
}
|
2023-03-20 10:21:56 +08:00
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
switch segIdx.IndexState {
|
|
|
|
case commonpb.IndexState_IndexStateNone:
|
|
|
|
// can't to here
|
2023-02-28 10:29:50 +08:00
|
|
|
log.Warn("receive unexpected index state: IndexStateNone", zap.Int64("segmentID", segIdx.SegmentID))
|
2023-01-04 19:37:36 +08:00
|
|
|
cntNone++
|
|
|
|
case commonpb.IndexState_Unissued:
|
|
|
|
cntUnissued++
|
|
|
|
case commonpb.IndexState_InProgress:
|
|
|
|
cntInProgress++
|
|
|
|
case commonpb.IndexState_Finished:
|
|
|
|
cntFinished++
|
|
|
|
indexedRows += seg.NumOfRows
|
|
|
|
case commonpb.IndexState_Failed:
|
|
|
|
cntFailed++
|
|
|
|
failReason += fmt.Sprintf("%d: %s;", segIdx.SegmentID, segIdx.FailReason)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-01 18:14:32 +08:00
|
|
|
if realTime {
|
|
|
|
indexInfo.IndexedRows = indexedRows
|
|
|
|
} else {
|
|
|
|
indexInfo.IndexedRows = s.countIndexedRows(indexInfo, segments)
|
|
|
|
}
|
2023-01-04 19:37:36 +08:00
|
|
|
indexInfo.TotalRows = totalRows
|
2023-06-01 18:14:32 +08:00
|
|
|
indexInfo.PendingIndexRows = pendingIndexRows
|
2023-01-04 19:37:36 +08:00
|
|
|
switch {
|
|
|
|
case cntFailed > 0:
|
|
|
|
indexInfo.State = commonpb.IndexState_Failed
|
|
|
|
indexInfo.IndexStateFailReason = failReason
|
2023-02-28 10:29:50 +08:00
|
|
|
case cntInProgress > 0 || cntUnissued > 0:
|
2023-01-04 19:37:36 +08:00
|
|
|
indexInfo.State = commonpb.IndexState_InProgress
|
2023-02-28 10:29:50 +08:00
|
|
|
case cntNone > 0:
|
|
|
|
indexInfo.State = commonpb.IndexState_IndexStateNone
|
|
|
|
default:
|
|
|
|
indexInfo.State = commonpb.IndexState_Finished
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Info("completeIndexInfo success", zap.Int64("collectionID", index.CollectionID), zap.Int64("indexID", index.IndexID),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64("totalRows", indexInfo.TotalRows), zap.Int64("indexRows", indexInfo.IndexedRows),
|
2023-06-01 18:14:32 +08:00
|
|
|
zap.Int64("pendingIndexRows", indexInfo.PendingIndexRows),
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.String("state", indexInfo.State.String()), zap.String("failReason", indexInfo.IndexStateFailReason))
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetIndexBuildProgress get the index building progress by num rows.
|
2023-05-06 10:34:39 +08:00
|
|
|
// Deprecated
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetIndexBuildProgressRequest) (*indexpb.GetIndexBuildProgressResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
|
|
|
log.Info("receive GetIndexBuildProgress request", zap.String("indexName", req.GetIndexName()))
|
2023-09-26 17:15:27 +08:00
|
|
|
|
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexBuildProgressResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexes) == 0 {
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexNotFound(req.GetIndexName())
|
|
|
|
log.Warn("GetIndexBuildProgress fail", zap.String("indexName", req.IndexName), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexBuildProgressResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2023-01-11 16:59:42 +08:00
|
|
|
|
|
|
|
if len(indexes) > 1 {
|
|
|
|
log.Warn(msgAmbiguousIndexName())
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexDuplicate(req.GetIndexName())
|
2023-01-11 16:59:42 +08:00
|
|
|
return &indexpb.GetIndexBuildProgressResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-11 16:59:42 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2023-01-11 14:35:40 +08:00
|
|
|
indexInfo := &indexpb.IndexInfo{
|
2023-09-15 10:07:19 +08:00
|
|
|
CollectionID: req.GetCollectionID(),
|
2023-06-01 18:14:32 +08:00
|
|
|
IndexID: indexes[0].IndexID,
|
|
|
|
IndexedRows: 0,
|
|
|
|
TotalRows: 0,
|
|
|
|
PendingIndexRows: 0,
|
|
|
|
State: 0,
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
s.completeIndexInfo(indexInfo, indexes[0], s.meta.SelectSegments(func(info *SegmentInfo) bool {
|
2023-02-28 10:29:50 +08:00
|
|
|
return isFlush(info) && info.CollectionID == req.GetCollectionID()
|
2023-07-25 10:05:00 +08:00
|
|
|
}), false, indexes[0].CreateTime)
|
|
|
|
log.Info("GetIndexBuildProgress success", zap.Int64("collectionID", req.GetCollectionID()),
|
|
|
|
zap.String("indexName", req.GetIndexName()))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexBuildProgressResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
2023-06-01 18:14:32 +08:00
|
|
|
IndexedRows: indexInfo.IndexedRows,
|
|
|
|
TotalRows: indexInfo.TotalRows,
|
|
|
|
PendingIndexRows: indexInfo.PendingIndexRows,
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DescribeIndex describe the index info of the collection.
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRequest) (*indexpb.DescribeIndexResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-10-08 21:23:32 +08:00
|
|
|
zap.String("indexName", req.GetIndexName()),
|
|
|
|
)
|
|
|
|
log.Info("receive DescribeIndex request",
|
|
|
|
zap.Uint64("timestamp", req.GetTimestamp()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
2023-09-26 17:15:27 +08:00
|
|
|
|
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.DescribeIndexResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexes) == 0 {
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexNotFound(req.GetIndexName())
|
2023-10-08 21:23:32 +08:00
|
|
|
log.Warn("DescribeIndex fail", zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.DescribeIndexResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The total rows of all indexes should be based on the current perspective
|
|
|
|
segments := s.meta.SelectSegments(func(info *SegmentInfo) bool {
|
|
|
|
return isFlush(info) && info.CollectionID == req.GetCollectionID()
|
|
|
|
})
|
2023-01-11 14:35:40 +08:00
|
|
|
indexInfos := make([]*indexpb.IndexInfo, 0)
|
2023-01-04 19:37:36 +08:00
|
|
|
for _, index := range indexes {
|
2023-01-11 14:35:40 +08:00
|
|
|
indexInfo := &indexpb.IndexInfo{
|
2023-01-04 19:37:36 +08:00
|
|
|
CollectionID: index.CollectionID,
|
|
|
|
FieldID: index.FieldID,
|
|
|
|
IndexName: index.IndexName,
|
|
|
|
IndexID: index.IndexID,
|
|
|
|
TypeParams: index.TypeParams,
|
|
|
|
IndexParams: index.IndexParams,
|
|
|
|
IndexedRows: 0,
|
|
|
|
TotalRows: 0,
|
|
|
|
State: 0,
|
|
|
|
IndexStateFailReason: "",
|
|
|
|
IsAutoIndex: index.IsAutoIndex,
|
|
|
|
UserIndexParams: index.UserIndexParams,
|
|
|
|
}
|
2023-07-25 10:05:00 +08:00
|
|
|
createTs := index.CreateTime
|
|
|
|
if req.GetTimestamp() != 0 {
|
|
|
|
createTs = req.GetTimestamp()
|
|
|
|
}
|
|
|
|
s.completeIndexInfo(indexInfo, index, segments, false, createTs)
|
2023-01-04 19:37:36 +08:00
|
|
|
indexInfos = append(indexInfos, indexInfo)
|
|
|
|
}
|
2023-10-08 21:23:32 +08:00
|
|
|
log.Info("DescribeIndex success")
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.DescribeIndexResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
2023-01-04 19:37:36 +08:00
|
|
|
IndexInfos: indexInfos,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-05-06 10:34:39 +08:00
|
|
|
// GetIndexStatistics get the statistics of the index. DescribeIndex doesn't contain statistics.
|
|
|
|
func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexStatisticsRequest) (*indexpb.GetIndexStatisticsResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
|
|
|
log.Info("receive GetIndexStatistics request", zap.String("indexName", req.GetIndexName()))
|
2023-09-26 17:15:27 +08:00
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-05-06 10:34:39 +08:00
|
|
|
return &indexpb.GetIndexStatisticsResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-05-06 10:34:39 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexes) == 0 {
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexNotFound(req.GetIndexName())
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("GetIndexStatistics fail",
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.String("indexName", req.GetIndexName()),
|
2023-09-26 17:15:27 +08:00
|
|
|
zap.Error(err))
|
2023-05-06 10:34:39 +08:00
|
|
|
return &indexpb.GetIndexStatisticsResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-05-06 10:34:39 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The total rows of all indexes should be based on the current perspective
|
|
|
|
segments := s.meta.SelectSegments(func(info *SegmentInfo) bool {
|
|
|
|
return isFlush(info) && info.CollectionID == req.GetCollectionID()
|
|
|
|
})
|
|
|
|
indexInfos := make([]*indexpb.IndexInfo, 0)
|
|
|
|
for _, index := range indexes {
|
|
|
|
indexInfo := &indexpb.IndexInfo{
|
|
|
|
CollectionID: index.CollectionID,
|
|
|
|
FieldID: index.FieldID,
|
|
|
|
IndexName: index.IndexName,
|
|
|
|
IndexID: index.IndexID,
|
|
|
|
TypeParams: index.TypeParams,
|
|
|
|
IndexParams: index.IndexParams,
|
|
|
|
IndexedRows: 0,
|
|
|
|
TotalRows: 0,
|
|
|
|
State: 0,
|
|
|
|
IndexStateFailReason: "",
|
|
|
|
IsAutoIndex: index.IsAutoIndex,
|
|
|
|
UserIndexParams: index.UserIndexParams,
|
|
|
|
}
|
2023-07-25 10:05:00 +08:00
|
|
|
s.completeIndexInfo(indexInfo, index, segments, true, index.CreateTime)
|
2023-05-06 10:34:39 +08:00
|
|
|
indexInfos = append(indexInfos, indexInfo)
|
|
|
|
}
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Debug("GetIndexStatisticsResponse success",
|
2023-05-06 10:34:39 +08:00
|
|
|
zap.String("indexName", req.GetIndexName()))
|
|
|
|
return &indexpb.GetIndexStatisticsResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
2023-05-06 10:34:39 +08:00
|
|
|
IndexInfos: indexInfos,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
// DropIndex deletes indexes based on IndexName. One IndexName corresponds to the index of an entire column. A column is
|
|
|
|
// divided into many segments, and each segment corresponds to an IndexBuildID. DataCoord uses IndexBuildID to record
|
|
|
|
// index tasks.
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
|
|
|
log.Info("receive DropIndex request",
|
2023-01-04 19:37:36 +08:00
|
|
|
zap.Int64s("partitionIDs", req.GetPartitionIDs()), zap.String("indexName", req.GetIndexName()),
|
|
|
|
zap.Bool("drop all indexes", req.GetDropAll()))
|
|
|
|
|
2023-09-26 17:15:27 +08:00
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
|
|
|
return merr.Status(err), nil
|
|
|
|
}
|
2023-01-04 19:37:36 +08:00
|
|
|
|
|
|
|
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
|
|
|
|
if len(indexes) == 0 {
|
|
|
|
log.Info(fmt.Sprintf("there is no index on collection: %d with the index name: %s", req.CollectionID, req.IndexName))
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(nil), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if !req.GetDropAll() && len(indexes) > 1 {
|
|
|
|
log.Warn(msgAmbiguousIndexName())
|
2023-09-26 17:15:27 +08:00
|
|
|
err := merr.WrapErrIndexDuplicate(req.GetIndexName())
|
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
indexIDs := make([]UniqueID, 0)
|
|
|
|
for _, index := range indexes {
|
|
|
|
indexIDs = append(indexIDs, index.IndexID)
|
|
|
|
}
|
2023-09-27 10:33:27 +08:00
|
|
|
// Compatibility logic. To prevent the index on the corresponding segments
|
|
|
|
// from being dropped at the same time when dropping_partition in version 2.1
|
2023-01-04 19:37:36 +08:00
|
|
|
if len(req.GetPartitionIDs()) == 0 {
|
|
|
|
// drop collection index
|
2023-09-15 10:07:19 +08:00
|
|
|
err := s.meta.MarkIndexAsDeleted(req.GetCollectionID(), indexIDs)
|
2023-01-04 19:37:36 +08:00
|
|
|
if err != nil {
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Warn("DropIndex fail", zap.String("indexName", req.IndexName), zap.Error(err))
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(err), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-15 10:07:19 +08:00
|
|
|
log.Debug("DropIndex success", zap.Int64s("partitionIDs", req.GetPartitionIDs()),
|
|
|
|
zap.String("indexName", req.GetIndexName()), zap.Int64s("indexIDs", indexIDs))
|
2023-09-26 17:15:27 +08:00
|
|
|
return merr.Status(nil), nil
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetIndexInfos gets the index file paths for segment from DataCoord.
|
2023-01-11 14:35:40 +08:00
|
|
|
func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoRequest) (*indexpb.GetIndexInfoResponse, error) {
|
2023-07-14 15:56:31 +08:00
|
|
|
log := log.Ctx(ctx).With(
|
2023-09-15 10:07:19 +08:00
|
|
|
zap.Int64("collectionID", req.GetCollectionID()),
|
2023-07-14 15:56:31 +08:00
|
|
|
)
|
2023-09-26 17:15:27 +08:00
|
|
|
|
|
|
|
if err := merr.CheckHealthy(s.GetStateCode()); err != nil {
|
|
|
|
log.Warn(msgDataCoordIsUnhealthy(paramtable.GetNodeID()), zap.Error(err))
|
2023-01-11 14:35:40 +08:00
|
|
|
return &indexpb.GetIndexInfoResponse{
|
2023-09-26 17:15:27 +08:00
|
|
|
Status: merr.Status(err),
|
2023-01-04 19:37:36 +08:00
|
|
|
}, nil
|
|
|
|
}
|
2023-01-11 14:35:40 +08:00
|
|
|
ret := &indexpb.GetIndexInfoResponse{
|
2023-09-04 09:57:09 +08:00
|
|
|
Status: merr.Status(nil),
|
2023-01-11 14:35:40 +08:00
|
|
|
SegmentInfo: map[int64]*indexpb.SegmentInfo{},
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
|
2023-09-15 10:07:19 +08:00
|
|
|
for _, segID := range req.GetSegmentIDs() {
|
2023-01-04 19:37:36 +08:00
|
|
|
segIdxes := s.meta.GetSegmentIndexes(segID)
|
2023-01-11 14:35:40 +08:00
|
|
|
ret.SegmentInfo[segID] = &indexpb.SegmentInfo{
|
2023-09-15 10:07:19 +08:00
|
|
|
CollectionID: req.GetCollectionID(),
|
2023-01-04 19:37:36 +08:00
|
|
|
SegmentID: segID,
|
|
|
|
EnableIndex: false,
|
2023-01-11 14:35:40 +08:00
|
|
|
IndexInfos: make([]*indexpb.IndexFilePathInfo, 0),
|
2023-01-04 19:37:36 +08:00
|
|
|
}
|
|
|
|
if len(segIdxes) != 0 {
|
|
|
|
ret.SegmentInfo[segID].EnableIndex = true
|
|
|
|
for _, segIdx := range segIdxes {
|
|
|
|
if segIdx.IndexState == commonpb.IndexState_Finished {
|
|
|
|
indexFilePaths := metautil.BuildSegmentIndexFilePaths(s.meta.chunkManager.RootPath(), segIdx.BuildID, segIdx.IndexVersion,
|
|
|
|
segIdx.PartitionID, segIdx.SegmentID, segIdx.IndexFileKeys)
|
2023-06-25 14:38:44 +08:00
|
|
|
indexParams := s.meta.GetIndexParams(segIdx.CollectionID, segIdx.IndexID)
|
|
|
|
indexParams = append(indexParams, s.meta.GetTypeParams(segIdx.CollectionID, segIdx.IndexID)...)
|
2023-01-04 19:37:36 +08:00
|
|
|
ret.SegmentInfo[segID].IndexInfos = append(ret.SegmentInfo[segID].IndexInfos,
|
2023-01-11 14:35:40 +08:00
|
|
|
&indexpb.IndexFilePathInfo{
|
2023-09-25 21:39:27 +08:00
|
|
|
SegmentID: segID,
|
|
|
|
FieldID: s.meta.GetFieldIDByIndexID(segIdx.CollectionID, segIdx.IndexID),
|
|
|
|
IndexID: segIdx.IndexID,
|
|
|
|
BuildID: segIdx.BuildID,
|
|
|
|
IndexName: s.meta.GetIndexNameByID(segIdx.CollectionID, segIdx.IndexID),
|
|
|
|
IndexParams: indexParams,
|
|
|
|
IndexFilePaths: indexFilePaths,
|
|
|
|
SerializedSize: segIdx.IndexSize,
|
|
|
|
IndexVersion: segIdx.IndexVersion,
|
|
|
|
NumRows: segIdx.NumRows,
|
|
|
|
CurrentIndexVersion: segIdx.CurrentIndexVersion,
|
2023-01-04 19:37:36 +08:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-14 15:56:31 +08:00
|
|
|
log.Debug("GetIndexInfos successfully", zap.String("indexName", req.GetIndexName()))
|
2023-01-04 19:37:36 +08:00
|
|
|
|
|
|
|
return ret, nil
|
|
|
|
}
|
2023-05-06 10:34:39 +08:00
|
|
|
|
|
|
|
func (s *Server) UnhealthyStatus() *commonpb.Status {
|
2023-09-07 19:23:15 +08:00
|
|
|
code := s.stateCode.Load().(commonpb.StateCode)
|
|
|
|
return merr.Status(merr.WrapErrServiceNotReady(code.String()))
|
2023-05-06 10:34:39 +08:00
|
|
|
}
|