2021-04-19 13:47:10 +08:00
|
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
|
package querynode
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"context"
|
|
|
|
|
"errors"
|
2021-06-15 12:41:40 +08:00
|
|
|
|
"fmt"
|
|
|
|
|
"math/rand"
|
|
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
|
2021-11-25 15:03:16 +08:00
|
|
|
|
"github.com/milvus-io/milvus/internal/common"
|
2021-04-22 14:45:57 +08:00
|
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/milvuspb"
|
|
|
|
|
queryPb "github.com/milvus-io/milvus/internal/proto/querypb"
|
2021-11-05 16:00:55 +08:00
|
|
|
|
"github.com/milvus-io/milvus/internal/util/metricsinfo"
|
2021-11-05 14:57:44 +08:00
|
|
|
|
"github.com/milvus-io/milvus/internal/util/mqclient"
|
2021-04-22 14:45:57 +08:00
|
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2021-04-12 09:18:43 +08:00
|
|
|
|
)
|
|
|
|
|
|
2021-10-30 18:58:59 +08:00
|
|
|
|
// GetComponentStates return information about whether the node is healthy
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
|
|
|
|
|
stats := &internalpb.ComponentStates{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
},
|
|
|
|
|
}
|
2021-11-25 15:03:16 +08:00
|
|
|
|
code, ok := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if !ok {
|
|
|
|
|
errMsg := "unexpected error in type assertion"
|
2021-04-12 09:18:43 +08:00
|
|
|
|
stats.Status = &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
2021-11-25 15:03:16 +08:00
|
|
|
|
Reason: errMsg,
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-11-25 15:03:16 +08:00
|
|
|
|
return stats, errors.New(errMsg)
|
|
|
|
|
}
|
|
|
|
|
nodeID := common.NotRegisteredID
|
|
|
|
|
if node.session != nil && node.session.Registered() {
|
|
|
|
|
nodeID = node.session.ServerID
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
|
|
|
|
info := &internalpb.ComponentInfo{
|
2021-11-25 15:03:16 +08:00
|
|
|
|
NodeID: nodeID,
|
2021-04-12 09:18:43 +08:00
|
|
|
|
Role: typeutil.QueryNodeRole,
|
|
|
|
|
StateCode: code,
|
|
|
|
|
}
|
|
|
|
|
stats.State = info
|
|
|
|
|
return stats, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-29 20:10:43 +08:00
|
|
|
|
// GetTimeTickChannel returns the time tick channel
|
|
|
|
|
// TimeTickChannel contains many time tick messages, which will be sent by query nodes
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
|
|
|
|
return &milvuspb.StringResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
Reason: "",
|
|
|
|
|
},
|
|
|
|
|
Value: Params.QueryTimeTickChannelName,
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-30 19:00:52 +08:00
|
|
|
|
// GetStatisticsChannel return the statistics channel
|
|
|
|
|
// Statistics channel contains statistics infos of query nodes, such as segment infos, memory infos
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
|
|
|
|
|
return &milvuspb.StringResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
Reason: "",
|
|
|
|
|
},
|
|
|
|
|
Value: Params.StatsChannelName,
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-01 22:32:33 +08:00
|
|
|
|
// AddQueryChannel watch queryChannel of the collection to receive query message
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
collectionID := in.CollectionID
|
2021-06-23 20:26:10 +08:00
|
|
|
|
if node.queryService == nil {
|
|
|
|
|
errMsg := "null query service, collectionID = " + fmt.Sprintln(collectionID)
|
2021-06-15 12:41:40 +08:00
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: errMsg,
|
|
|
|
|
}
|
|
|
|
|
return status, errors.New(errMsg)
|
|
|
|
|
}
|
|
|
|
|
|
2021-10-15 20:35:05 +08:00
|
|
|
|
if node.queryService.hasQueryCollection(collectionID) {
|
|
|
|
|
log.Debug("queryCollection has been existed when addQueryChannel",
|
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
|
)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
|
|
|
|
|
// add search collection
|
2021-10-15 20:35:05 +08:00
|
|
|
|
err := node.queryService.addQueryCollection(collectionID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
2021-09-24 13:57:54 +08:00
|
|
|
|
}
|
2021-10-15 20:35:05 +08:00
|
|
|
|
return status, err
|
2021-06-15 12:41:40 +08:00
|
|
|
|
}
|
2021-10-15 20:35:05 +08:00
|
|
|
|
log.Debug("add query collection", zap.Any("collectionID", collectionID))
|
2021-06-15 12:41:40 +08:00
|
|
|
|
|
|
|
|
|
// add request channel
|
2021-09-24 13:57:54 +08:00
|
|
|
|
sc, err := node.queryService.getQueryCollection(in.CollectionID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
consumeChannels := []string{in.RequestChannelID}
|
2021-10-26 19:24:23 +08:00
|
|
|
|
consumeSubName := Params.MsgChannelSubName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
2021-11-05 14:57:44 +08:00
|
|
|
|
|
|
|
|
|
if Params.skipQueryChannelRecovery {
|
|
|
|
|
log.Debug("Skip query channel seek back ", zap.Strings("channels", consumeChannels),
|
|
|
|
|
zap.String("seek position", string(in.SeekPosition.MsgID)),
|
|
|
|
|
zap.Uint64("ts", in.SeekPosition.Timestamp))
|
|
|
|
|
sc.queryMsgStream.AsConsumerWithPosition(consumeChannels, consumeSubName, mqclient.SubscriptionPositionLatest)
|
2021-10-15 20:35:05 +08:00
|
|
|
|
} else {
|
2021-11-05 14:57:44 +08:00
|
|
|
|
sc.queryMsgStream.AsConsumer(consumeChannels, consumeSubName)
|
|
|
|
|
if in.SeekPosition == nil || len(in.SeekPosition.MsgID) == 0 {
|
|
|
|
|
// as consumer
|
|
|
|
|
log.Debug("querynode AsConsumer: " + strings.Join(consumeChannels, ", ") + " : " + consumeSubName)
|
|
|
|
|
} else {
|
|
|
|
|
// seek query channel
|
|
|
|
|
err = sc.queryMsgStream.Seek([]*internalpb.MsgPosition{in.SeekPosition})
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
2021-10-15 20:35:05 +08:00
|
|
|
|
}
|
2021-11-24 10:25:15 +08:00
|
|
|
|
log.Debug("querynode seek query channel: ", zap.Any("consumeChannels", consumeChannels),
|
|
|
|
|
zap.String("seek position", string(in.SeekPosition.MsgID)))
|
2021-10-15 20:35:05 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
|
|
|
|
|
// add result channel
|
|
|
|
|
producerChannels := []string{in.ResultChannelID}
|
2021-06-23 20:26:10 +08:00
|
|
|
|
sc.queryResultMsgStream.AsProducer(producerChannels)
|
2021-06-15 12:41:40 +08:00
|
|
|
|
log.Debug("querynode AsProducer: " + strings.Join(producerChannels, ", "))
|
|
|
|
|
|
2021-10-15 20:35:05 +08:00
|
|
|
|
// init global sealed segments
|
|
|
|
|
for _, segment := range in.GlobalSealedSegments {
|
2021-11-17 12:13:10 +08:00
|
|
|
|
sc.globalSegmentManager.addGlobalSegmentInfo(segment)
|
2021-10-15 20:35:05 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// start queryCollection, message stream need to asConsumer before start
|
2021-06-15 12:41:40 +08:00
|
|
|
|
sc.start()
|
2021-06-23 20:26:10 +08:00
|
|
|
|
log.Debug("start query collection", zap.Any("collectionID", collectionID))
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-01 22:57:07 +08:00
|
|
|
|
// RemoveQueryChannel remove queryChannel of the collection to stop receiving query message
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) RemoveQueryChannel(ctx context.Context, in *queryPb.RemoveQueryChannelRequest) (*commonpb.Status, error) {
|
|
|
|
|
// if node.searchService == nil || node.searchService.searchMsgStream == nil {
|
|
|
|
|
// errMsg := "null search service or null search result message stream"
|
|
|
|
|
// status := &commonpb.Status{
|
|
|
|
|
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
// Reason: errMsg,
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// return status, errors.New(errMsg)
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// searchStream, ok := node.searchService.searchMsgStream.(*pulsarms.PulsarMsgStream)
|
|
|
|
|
// if !ok {
|
|
|
|
|
// errMsg := "type assertion failed for search message stream"
|
|
|
|
|
// status := &commonpb.Status{
|
|
|
|
|
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
// Reason: errMsg,
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// return status, errors.New(errMsg)
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// resultStream, ok := node.searchService.searchResultMsgStream.(*pulsarms.PulsarMsgStream)
|
|
|
|
|
// if !ok {
|
|
|
|
|
// errMsg := "type assertion failed for search result message stream"
|
|
|
|
|
// status := &commonpb.Status{
|
|
|
|
|
// ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
// Reason: errMsg,
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// return status, errors.New(errMsg)
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// // remove request channel
|
|
|
|
|
// consumeChannels := []string{in.RequestChannelID}
|
|
|
|
|
// consumeSubName := Params.MsgChannelSubName
|
|
|
|
|
// // TODO: searchStream.RemovePulsarConsumers(producerChannels)
|
|
|
|
|
// searchStream.AsConsumer(consumeChannels, consumeSubName)
|
|
|
|
|
|
|
|
|
|
// // remove result channel
|
|
|
|
|
// producerChannels := []string{in.ResultChannelID}
|
|
|
|
|
// // TODO: resultStream.RemovePulsarProducer(producerChannels)
|
|
|
|
|
// resultStream.AsProducer(producerChannels)
|
|
|
|
|
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-02 22:00:38 +08:00
|
|
|
|
// WatchDmChannels create consumers on dmChannels to reveive Incremental data,which is the important part of real-time query
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
|
dct := &watchDmChannelsTask{
|
|
|
|
|
baseTask: baseTask{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
done: make(chan error),
|
|
|
|
|
},
|
|
|
|
|
req: in,
|
|
|
|
|
node: node,
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-04-16 14:40:33 +08:00
|
|
|
|
err := node.scheduler.queue.Enqueue(dct)
|
|
|
|
|
if err != nil {
|
2021-04-12 09:18:43 +08:00
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
2021-04-16 14:40:33 +08:00
|
|
|
|
Reason: err.Error(),
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-16 14:40:33 +08:00
|
|
|
|
return status, err
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
|
log.Debug("watchDmChannelsTask Enqueue done", zap.Any("collectionID", in.CollectionID))
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
2021-08-03 22:01:27 +08:00
|
|
|
|
waitFunc := func() (*commonpb.Status, error) {
|
2021-04-16 14:40:33 +08:00
|
|
|
|
err = dct.WaitToFinish()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
if err != nil {
|
2021-08-03 22:01:27 +08:00
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-08-03 22:01:27 +08:00
|
|
|
|
return status, err
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
|
log.Debug("watchDmChannelsTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
|
2021-08-03 22:01:27 +08:00
|
|
|
|
return &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}, nil
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-08-03 22:01:27 +08:00
|
|
|
|
|
|
|
|
|
return waitFunc()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-11-05 14:47:19 +08:00
|
|
|
|
// WatchDeltaChannels create consumers on dmChannels to reveive Incremental data,which is the important part of real-time query
|
|
|
|
|
func (node *QueryNode) WatchDeltaChannels(ctx context.Context, in *queryPb.WatchDeltaChannelsRequest) (*commonpb.Status, error) {
|
2021-11-09 09:27:04 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
|
|
|
|
dct := &watchDeltaChannelsTask{
|
|
|
|
|
baseTask: baseTask{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
done: make(chan error),
|
|
|
|
|
},
|
|
|
|
|
req: in,
|
|
|
|
|
node: node,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err := node.scheduler.queue.Enqueue(dct)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
log.Warn(err.Error())
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
|
|
|
|
log.Debug("watchDeltaChannelsTask Enqueue done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
|
|
|
|
|
waitFunc := func() (*commonpb.Status, error) {
|
|
|
|
|
err = dct.WaitToFinish()
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
log.Warn(err.Error())
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
|
|
|
|
log.Debug("watchDeltaChannelsTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
return &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return waitFunc()
|
2021-11-05 14:47:19 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-11-02 21:58:22 +08:00
|
|
|
|
// LoadSegments load historical data into query node, historical data can be vector data or index
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) LoadSegments(ctx context.Context, in *queryPb.LoadSegmentsRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
dct := &loadSegmentsTask{
|
|
|
|
|
baseTask: baseTask{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
done: make(chan error),
|
|
|
|
|
},
|
|
|
|
|
req: in,
|
|
|
|
|
node: node,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err := node.scheduler.queue.Enqueue(dct)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-12 09:18:43 +08:00
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
segmentIDs := make([]UniqueID, 0)
|
|
|
|
|
for _, info := range in.Infos {
|
|
|
|
|
segmentIDs = append(segmentIDs, info.SegmentID)
|
|
|
|
|
}
|
|
|
|
|
log.Debug("loadSegmentsTask Enqueue done", zap.Int64s("segmentIDs", segmentIDs))
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
2021-08-03 22:01:27 +08:00
|
|
|
|
waitFunc := func() (*commonpb.Status, error) {
|
2021-04-16 14:40:33 +08:00
|
|
|
|
err = dct.WaitToFinish()
|
|
|
|
|
if err != nil {
|
2021-08-03 22:01:27 +08:00
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-08-03 22:01:27 +08:00
|
|
|
|
return status, err
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
|
log.Debug("loadSegmentsTask WaitToFinish done", zap.Int64s("segmentIDs", segmentIDs))
|
2021-08-03 22:01:27 +08:00
|
|
|
|
return &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}, nil
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-08-03 22:01:27 +08:00
|
|
|
|
|
|
|
|
|
return waitFunc()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
|
|
|
|
|
2021-10-30 19:02:41 +08:00
|
|
|
|
// ReleaseCollection clears all data related to this collecion on the querynode
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) ReleaseCollection(ctx context.Context, in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
dct := &releaseCollectionTask{
|
|
|
|
|
baseTask: baseTask{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
done: make(chan error),
|
|
|
|
|
},
|
|
|
|
|
req: in,
|
|
|
|
|
node: node,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err := node.scheduler.queue.Enqueue(dct)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-12 09:18:43 +08:00
|
|
|
|
return status, err
|
|
|
|
|
}
|
|
|
|
|
log.Debug("releaseCollectionTask Enqueue done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
|
2021-04-16 17:37:50 +08:00
|
|
|
|
func() {
|
2021-04-16 14:40:33 +08:00
|
|
|
|
err = dct.WaitToFinish()
|
|
|
|
|
if err != nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-16 14:40:33 +08:00
|
|
|
|
return
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
|
log.Debug("releaseCollectionTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
}()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-01 22:58:53 +08:00
|
|
|
|
// ReleasePartitions clears all data related to this partition on the querynode
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) ReleasePartitions(ctx context.Context, in *queryPb.ReleasePartitionsRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
dct := &releasePartitionsTask{
|
|
|
|
|
baseTask: baseTask{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
done: make(chan error),
|
|
|
|
|
},
|
|
|
|
|
req: in,
|
|
|
|
|
node: node,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err := node.scheduler.queue.Enqueue(dct)
|
|
|
|
|
if err != nil {
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-12 09:18:43 +08:00
|
|
|
|
return status, err
|
|
|
|
|
}
|
|
|
|
|
log.Debug("releasePartitionsTask Enqueue done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
|
2021-04-16 17:37:50 +08:00
|
|
|
|
func() {
|
2021-04-16 14:40:33 +08:00
|
|
|
|
err = dct.WaitToFinish()
|
|
|
|
|
if err != nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
|
log.Warn(err.Error())
|
2021-04-16 14:40:33 +08:00
|
|
|
|
return
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
|
log.Debug("releasePartitionsTask WaitToFinish done", zap.Any("collectionID", in.CollectionID))
|
|
|
|
|
}()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-03 23:13:26 +08:00
|
|
|
|
// ReleaseSegments remove the specified segments from query node according segmentIDs, partitionIDs, and collectionID
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) ReleaseSegments(ctx context.Context, in *queryPb.ReleaseSegmentsRequest) (*commonpb.Status, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
}
|
|
|
|
|
return status, err
|
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
status := &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
}
|
|
|
|
|
for _, id := range in.SegmentIDs {
|
2021-06-15 12:41:40 +08:00
|
|
|
|
err := node.historical.replica.removeSegment(id)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// not return, try to release all segments
|
|
|
|
|
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
|
|
|
|
status.Reason = err.Error()
|
|
|
|
|
}
|
|
|
|
|
err = node.streaming.replica.removeSegment(id)
|
|
|
|
|
if err != nil {
|
2021-04-12 09:18:43 +08:00
|
|
|
|
// not return, try to release all segments
|
|
|
|
|
status.ErrorCode = commonpb.ErrorCode_UnexpectedError
|
2021-06-15 12:41:40 +08:00
|
|
|
|
status.Reason = err.Error()
|
2021-04-12 09:18:43 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return status, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-02 21:56:34 +08:00
|
|
|
|
// GetSegmentInfo returns segment information of the collection on the queryNode, and the information includes memSize, numRow, indexName, indexID ...
|
2021-04-12 09:18:43 +08:00
|
|
|
|
func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *queryPb.GetSegmentInfoRequest) (*queryPb.GetSegmentInfoResponse, error) {
|
2021-07-13 14:16:00 +08:00
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
if code != internalpb.StateCode_Healthy {
|
|
|
|
|
err := fmt.Errorf("query node %d is not ready", Params.QueryNodeID)
|
|
|
|
|
res := &queryPb.GetSegmentInfoResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return res, err
|
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
infos := make([]*queryPb.SegmentInfo, 0)
|
2021-11-05 16:00:55 +08:00
|
|
|
|
|
2021-05-28 10:26:30 +08:00
|
|
|
|
// get info from historical
|
2021-11-25 17:49:19 +08:00
|
|
|
|
// node.historical.replica.printReplica()
|
2021-11-05 16:00:55 +08:00
|
|
|
|
historicalSegmentInfos, err := node.historical.replica.getSegmentInfosByColID(in.CollectionID)
|
2021-10-20 19:47:35 +08:00
|
|
|
|
if err != nil {
|
2021-11-05 16:00:55 +08:00
|
|
|
|
log.Debug("GetSegmentInfo: get historical segmentInfo failed", zap.Int64("collectionID", in.CollectionID), zap.Error(err))
|
2021-10-20 19:47:35 +08:00
|
|
|
|
res := &queryPb.GetSegmentInfoResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return res, err
|
|
|
|
|
}
|
2021-11-05 16:00:55 +08:00
|
|
|
|
infos = append(infos, historicalSegmentInfos...)
|
2021-10-20 19:47:35 +08:00
|
|
|
|
|
2021-05-28 10:26:30 +08:00
|
|
|
|
// get info from streaming
|
2021-11-25 17:49:19 +08:00
|
|
|
|
// node.streaming.replica.printReplica()
|
2021-11-05 16:00:55 +08:00
|
|
|
|
streamingSegmentInfos, err := node.streaming.replica.getSegmentInfosByColID(in.CollectionID)
|
2021-10-20 19:47:35 +08:00
|
|
|
|
if err != nil {
|
2021-11-05 16:00:55 +08:00
|
|
|
|
log.Debug("GetSegmentInfo: get streaming segmentInfo failed", zap.Int64("collectionID", in.CollectionID), zap.Error(err))
|
2021-10-20 19:47:35 +08:00
|
|
|
|
res := &queryPb.GetSegmentInfoResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return res, err
|
|
|
|
|
}
|
2021-11-05 16:00:55 +08:00
|
|
|
|
infos = append(infos, streamingSegmentInfos...)
|
2021-11-25 17:49:19 +08:00
|
|
|
|
// log.Debug("GetSegmentInfo: get segment info from query node", zap.Int64("nodeID", node.session.ServerID), zap.Any("segment infos", infos))
|
2021-11-06 15:22:56 +08:00
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
|
return &queryPb.GetSegmentInfoResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
|
},
|
|
|
|
|
Infos: infos,
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
2021-08-17 10:06:11 +08:00
|
|
|
|
|
|
|
|
|
func (node *QueryNode) isHealthy() bool {
|
|
|
|
|
code := node.stateCode.Load().(internalpb.StateCode)
|
|
|
|
|
return code == internalpb.StateCode_Healthy
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-05 22:01:03 +08:00
|
|
|
|
// GetMetrics return system infos of the query node, such as total memory, memory uasge, cpu usage ...
|
2021-11-10 23:56:10 +08:00
|
|
|
|
// TODO(dragondriver): cache the Metrics and set a retention to the cache
|
2021-08-17 10:06:11 +08:00
|
|
|
|
func (node *QueryNode) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) {
|
|
|
|
|
log.Debug("QueryNode.GetMetrics",
|
|
|
|
|
zap.Int64("node_id", Params.QueryNodeID),
|
|
|
|
|
zap.String("req", req.Request))
|
|
|
|
|
|
|
|
|
|
if !node.isHealthy() {
|
|
|
|
|
log.Warn("QueryNode.GetMetrics failed",
|
|
|
|
|
zap.Int64("node_id", Params.QueryNodeID),
|
|
|
|
|
zap.String("req", req.Request),
|
|
|
|
|
zap.Error(errQueryNodeIsUnhealthy(Params.QueryNodeID)))
|
|
|
|
|
|
|
|
|
|
return &milvuspb.GetMetricsResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: msgQueryNodeIsUnhealthy(Params.QueryNodeID),
|
|
|
|
|
},
|
|
|
|
|
Response: "",
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
metricType, err := metricsinfo.ParseMetricType(req.Request)
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Warn("QueryNode.GetMetrics failed to parse metric type",
|
|
|
|
|
zap.Int64("node_id", Params.QueryNodeID),
|
|
|
|
|
zap.String("req", req.Request),
|
|
|
|
|
zap.Error(err))
|
|
|
|
|
|
|
|
|
|
return &milvuspb.GetMetricsResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: err.Error(),
|
|
|
|
|
},
|
|
|
|
|
Response: "",
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.Debug("QueryNode.GetMetrics",
|
|
|
|
|
zap.String("metric_type", metricType))
|
|
|
|
|
|
|
|
|
|
if metricType == metricsinfo.SystemInfoMetrics {
|
|
|
|
|
metrics, err := getSystemInfoMetrics(ctx, req, node)
|
|
|
|
|
|
|
|
|
|
log.Debug("QueryNode.GetMetrics",
|
|
|
|
|
zap.Int64("node_id", Params.QueryNodeID),
|
|
|
|
|
zap.String("req", req.Request),
|
|
|
|
|
zap.String("metric_type", metricType),
|
|
|
|
|
zap.Any("metrics", metrics), // TODO(dragondriver): necessary? may be very large
|
|
|
|
|
zap.Error(err))
|
|
|
|
|
|
|
|
|
|
return metrics, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.Debug("QueryNode.GetMetrics failed, request metric type is not implemented yet",
|
|
|
|
|
zap.Int64("node_id", Params.QueryNodeID),
|
|
|
|
|
zap.String("req", req.Request),
|
|
|
|
|
zap.String("metric_type", metricType))
|
|
|
|
|
|
|
|
|
|
return &milvuspb.GetMetricsResponse{
|
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
|
Reason: metricsinfo.MsgUnimplementedMetric,
|
|
|
|
|
},
|
|
|
|
|
Response: "",
|
|
|
|
|
}, nil
|
|
|
|
|
}
|