2021-04-19 13:47:10 +08:00
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
package querynode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2021-04-16 14:40:33 +08:00
|
|
|
"strconv"
|
2021-06-09 11:37:55 +08:00
|
|
|
"time"
|
2021-04-12 09:18:43 +08:00
|
|
|
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
2021-06-16 11:09:56 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
|
|
|
queryPb "github.com/milvus-io/milvus/internal/proto/querypb"
|
2021-06-23 17:26:09 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/rootcoord"
|
2021-11-30 10:29:43 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/mqclient"
|
2021-04-12 09:18:43 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type task interface {
|
|
|
|
ID() UniqueID // return ReqID
|
|
|
|
SetID(uid UniqueID) // set ReqID
|
|
|
|
Timestamp() Timestamp
|
|
|
|
PreExecute(ctx context.Context) error
|
|
|
|
Execute(ctx context.Context) error
|
|
|
|
PostExecute(ctx context.Context) error
|
|
|
|
WaitToFinish() error
|
|
|
|
Notify(err error)
|
|
|
|
OnEnqueue() error
|
|
|
|
}
|
|
|
|
|
|
|
|
type baseTask struct {
|
|
|
|
done chan error
|
|
|
|
ctx context.Context
|
|
|
|
id UniqueID
|
|
|
|
}
|
|
|
|
|
2021-11-30 10:29:43 +08:00
|
|
|
type addQueryChannelTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.AddQueryChannelRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
2021-04-16 14:40:33 +08:00
|
|
|
type watchDmChannelsTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.WatchDmChannelsRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
type watchDeltaChannelsTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.WatchDeltaChannelsRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
type loadSegmentsTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.LoadSegmentsRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
|
|
|
type releaseCollectionTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.ReleaseCollectionRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
|
|
|
type releasePartitionsTask struct {
|
|
|
|
baseTask
|
|
|
|
req *queryPb.ReleasePartitionsRequest
|
|
|
|
node *QueryNode
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *baseTask) ID() UniqueID {
|
|
|
|
return b.id
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *baseTask) SetID(uid UniqueID) {
|
|
|
|
b.id = uid
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *baseTask) WaitToFinish() error {
|
2021-04-16 14:40:33 +08:00
|
|
|
err := <-b.done
|
|
|
|
return err
|
2021-04-12 09:18:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *baseTask) Notify(err error) {
|
|
|
|
b.done <- err
|
|
|
|
}
|
|
|
|
|
2021-11-30 10:29:43 +08:00
|
|
|
// addQueryChannel
|
|
|
|
func (r *addQueryChannelTask) Timestamp() Timestamp {
|
|
|
|
if r.req.Base == nil {
|
|
|
|
log.Warn("nil base req in addQueryChannelTask", zap.Any("collectionID", r.req.CollectionID))
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return r.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *addQueryChannelTask) OnEnqueue() error {
|
|
|
|
if r.req == nil || r.req.Base == nil {
|
|
|
|
r.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
r.SetID(r.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *addQueryChannelTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *addQueryChannelTask) Execute(ctx context.Context) error {
|
|
|
|
log.Debug("Execute addQueryChannelTask",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID))
|
|
|
|
|
|
|
|
collectionID := r.req.CollectionID
|
|
|
|
if r.node.queryService == nil {
|
|
|
|
errMsg := "null query service, collectionID = " + fmt.Sprintln(collectionID)
|
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
|
|
|
|
|
|
|
if r.node.queryService.hasQueryCollection(collectionID) {
|
|
|
|
log.Debug("queryCollection has been existed when addQueryChannel",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// add search collection
|
|
|
|
err := r.node.queryService.addQueryCollection(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Debug("add query collection", zap.Any("collectionID", collectionID))
|
|
|
|
|
|
|
|
// add request channel
|
|
|
|
sc, err := r.node.queryService.getQueryCollection(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
consumeChannels := []string{r.req.RequestChannelID}
|
|
|
|
consumeSubName := Params.MsgChannelSubName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
|
|
|
|
|
|
|
if Params.skipQueryChannelRecovery {
|
|
|
|
log.Debug("Skip query channel seek back ", zap.Strings("channels", consumeChannels),
|
|
|
|
zap.String("seek position", string(r.req.SeekPosition.MsgID)),
|
|
|
|
zap.Uint64("ts", r.req.SeekPosition.Timestamp))
|
|
|
|
sc.queryMsgStream.AsConsumerWithPosition(consumeChannels, consumeSubName, mqclient.SubscriptionPositionLatest)
|
|
|
|
} else {
|
|
|
|
sc.queryMsgStream.AsConsumer(consumeChannels, consumeSubName)
|
|
|
|
if r.req.SeekPosition == nil || len(r.req.SeekPosition.MsgID) == 0 {
|
|
|
|
// as consumer
|
2021-12-10 14:43:18 +08:00
|
|
|
log.Debug("QueryNode AsConsumer", zap.Strings("channels", consumeChannels), zap.String("sub name", consumeSubName))
|
2021-11-30 10:29:43 +08:00
|
|
|
} else {
|
|
|
|
// seek query channel
|
|
|
|
err = sc.queryMsgStream.Seek([]*internalpb.MsgPosition{r.req.SeekPosition})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Debug("querynode seek query channel: ", zap.Any("consumeChannels", consumeChannels),
|
|
|
|
zap.String("seek position", string(r.req.SeekPosition.MsgID)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add result channel
|
|
|
|
producerChannels := []string{r.req.ResultChannelID}
|
|
|
|
sc.queryResultMsgStream.AsProducer(producerChannels)
|
2021-12-10 14:43:18 +08:00
|
|
|
log.Debug("QueryNode AsProducer", zap.Strings("channels", producerChannels))
|
2021-11-30 10:29:43 +08:00
|
|
|
|
|
|
|
// init global sealed segments
|
|
|
|
for _, segment := range r.req.GlobalSealedSegments {
|
|
|
|
sc.globalSegmentManager.addGlobalSegmentInfo(segment)
|
|
|
|
}
|
|
|
|
|
|
|
|
// start queryCollection, message stream need to asConsumer before start
|
|
|
|
sc.start()
|
|
|
|
log.Debug("start query collection", zap.Any("collectionID", collectionID))
|
|
|
|
|
|
|
|
log.Debug("addQueryChannelTask done",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *addQueryChannelTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-16 14:40:33 +08:00
|
|
|
// watchDmChannelsTask
|
|
|
|
func (w *watchDmChannelsTask) Timestamp() Timestamp {
|
|
|
|
if w.req.Base == nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn("nil base req in watchDmChannelsTask", zap.Any("collectionID", w.req.CollectionID))
|
2021-04-16 14:40:33 +08:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return w.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDmChannelsTask) OnEnqueue() error {
|
|
|
|
if w.req == nil || w.req.Base == nil {
|
|
|
|
w.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
w.SetID(w.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDmChannelsTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDmChannelsTask) Execute(ctx context.Context) error {
|
|
|
|
collectionID := w.req.CollectionID
|
2021-06-15 12:41:40 +08:00
|
|
|
partitionID := w.req.PartitionID
|
2021-09-24 20:53:49 +08:00
|
|
|
// if no partitionID is specified, load type is load collection
|
2021-06-15 12:41:40 +08:00
|
|
|
loadPartition := partitionID != 0
|
2021-06-09 11:37:55 +08:00
|
|
|
|
2021-06-15 20:06:10 +08:00
|
|
|
// get all vChannels
|
|
|
|
vChannels := make([]Channel, 0)
|
|
|
|
pChannels := make([]Channel, 0)
|
2021-06-23 17:26:09 +08:00
|
|
|
VPChannels := make(map[string]string) // map[vChannel]pChannel
|
2021-06-15 12:41:40 +08:00
|
|
|
for _, info := range w.req.Infos {
|
2021-06-23 17:26:09 +08:00
|
|
|
v := info.ChannelName
|
|
|
|
p := rootcoord.ToPhysicalChannel(info.ChannelName)
|
|
|
|
vChannels = append(vChannels, v)
|
|
|
|
pChannels = append(pChannels, p)
|
|
|
|
VPChannels[v] = p
|
2021-04-16 14:40:33 +08:00
|
|
|
}
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Starting WatchDmChannels ...",
|
2021-06-15 12:41:40 +08:00
|
|
|
zap.Any("collectionName", w.req.Schema.Name),
|
|
|
|
zap.Any("collectionID", collectionID),
|
2021-06-23 17:26:09 +08:00
|
|
|
zap.Any("vChannels", vChannels),
|
|
|
|
zap.Any("pChannels", pChannels),
|
2021-06-15 20:06:10 +08:00
|
|
|
)
|
|
|
|
if len(VPChannels) != len(vChannels) {
|
|
|
|
return errors.New("get physical channels failed, illegal channel length, collectionID = " + fmt.Sprintln(collectionID))
|
|
|
|
}
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Get physical channels done",
|
2021-06-23 17:26:09 +08:00
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
)
|
2021-06-15 12:41:40 +08:00
|
|
|
|
|
|
|
// init replica
|
|
|
|
if hasCollectionInStreaming := w.node.streaming.replica.hasCollection(collectionID); !hasCollectionInStreaming {
|
|
|
|
err := w.node.streaming.replica.addCollection(collectionID, w.req.Schema)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-11-09 09:27:04 +08:00
|
|
|
// init replica
|
2021-06-15 12:41:40 +08:00
|
|
|
if hasCollectionInHistorical := w.node.historical.replica.hasCollection(collectionID); !hasCollectionInHistorical {
|
|
|
|
err := w.node.historical.replica.addCollection(collectionID, w.req.Schema)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-06-19 18:38:07 +08:00
|
|
|
var l loadType
|
|
|
|
if loadPartition {
|
|
|
|
l = loadTypePartition
|
|
|
|
} else {
|
|
|
|
l = loadTypeCollection
|
|
|
|
}
|
|
|
|
sCol, err := w.node.streaming.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol.addVChannels(vChannels)
|
|
|
|
sCol.addPChannels(pChannels)
|
|
|
|
sCol.setLoadType(l)
|
2021-11-11 00:54:45 +08:00
|
|
|
hCol, err := w.node.historical.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
hCol.addVChannels(vChannels)
|
|
|
|
hCol.addPChannels(pChannels)
|
|
|
|
hCol.setLoadType(l)
|
2021-06-15 12:41:40 +08:00
|
|
|
if loadPartition {
|
2021-06-26 16:08:11 +08:00
|
|
|
sCol.deleteReleasedPartition(partitionID)
|
2021-11-11 00:54:45 +08:00
|
|
|
hCol.deleteReleasedPartition(partitionID)
|
2021-06-15 12:41:40 +08:00
|
|
|
if hasPartitionInStreaming := w.node.streaming.replica.hasPartition(partitionID); !hasPartitionInStreaming {
|
|
|
|
err := w.node.streaming.replica.addPartition(collectionID, partitionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-11-11 00:54:45 +08:00
|
|
|
if hasPartitionInHistorical := w.node.historical.replica.hasPartition(partitionID); !hasPartitionInHistorical {
|
|
|
|
err := w.node.historical.replica.addPartition(collectionID, partitionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
log.Debug("watchDMChannel, init replica done", zap.Any("collectionID", collectionID))
|
2021-04-16 14:40:33 +08:00
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// get subscription name
|
2021-04-16 14:40:33 +08:00
|
|
|
getUniqueSubName := func() string {
|
|
|
|
prefixName := Params.MsgChannelSubName
|
2021-06-15 12:41:40 +08:00
|
|
|
return prefixName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
2021-04-16 14:40:33 +08:00
|
|
|
}
|
2021-06-09 11:37:55 +08:00
|
|
|
consumeSubName := getUniqueSubName()
|
2021-04-16 14:40:33 +08:00
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// group channels by to seeking or consuming
|
|
|
|
toSeekChannels := make([]*internalpb.MsgPosition, 0)
|
2021-06-15 20:06:10 +08:00
|
|
|
toSubChannels := make([]Channel, 0)
|
2021-04-16 14:40:33 +08:00
|
|
|
for _, info := range w.req.Infos {
|
2021-06-15 12:41:40 +08:00
|
|
|
if info.SeekPosition == nil || len(info.SeekPosition.MsgID) == 0 {
|
|
|
|
toSubChannels = append(toSubChannels, info.ChannelName)
|
2021-04-16 14:40:33 +08:00
|
|
|
continue
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
info.SeekPosition.MsgGroup = consumeSubName
|
|
|
|
toSeekChannels = append(toSeekChannels, info.SeekPosition)
|
2021-04-16 14:40:33 +08:00
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
log.Debug("watchDMChannel, group channels done", zap.Any("collectionID", collectionID))
|
2021-04-16 14:40:33 +08:00
|
|
|
|
2021-11-24 11:27:15 +08:00
|
|
|
// add excluded segments for unFlushed segments,
|
|
|
|
// unFlushed segments before check point should be filtered out.
|
|
|
|
unFlushedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
|
2021-06-15 12:41:40 +08:00
|
|
|
for _, info := range w.req.Infos {
|
2021-11-24 11:27:15 +08:00
|
|
|
unFlushedCheckPointInfos = append(unFlushedCheckPointInfos, info.UnflushedSegments...)
|
2021-06-15 12:41:40 +08:00
|
|
|
}
|
2021-11-24 11:27:15 +08:00
|
|
|
w.node.streaming.replica.addExcludedSegments(collectionID, unFlushedCheckPointInfos)
|
|
|
|
log.Debug("watchDMChannel, add check points info for unFlushed segments done",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("unFlushedCheckPointInfos", unFlushedCheckPointInfos),
|
|
|
|
)
|
|
|
|
|
|
|
|
// add excluded segments for flushed segments,
|
|
|
|
// flushed segments with later check point than seekPosition should be filtered out.
|
|
|
|
flushedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
|
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
for _, flushedSegment := range info.FlushedSegments {
|
|
|
|
for _, position := range toSeekChannels {
|
|
|
|
if flushedSegment.DmlPosition != nil &&
|
|
|
|
flushedSegment.DmlPosition.ChannelName == position.ChannelName &&
|
|
|
|
flushedSegment.DmlPosition.Timestamp > position.Timestamp {
|
|
|
|
flushedCheckPointInfos = append(flushedCheckPointInfos, flushedSegment)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w.node.streaming.replica.addExcludedSegments(collectionID, flushedCheckPointInfos)
|
|
|
|
log.Debug("watchDMChannel, add check points info for flushed segments done",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("flushedCheckPointInfos", flushedCheckPointInfos),
|
|
|
|
)
|
2021-06-09 11:37:55 +08:00
|
|
|
|
2021-11-25 14:25:15 +08:00
|
|
|
// add excluded segments for dropped segments,
|
|
|
|
// dropped segments with later check point than seekPosition should be filtered out.
|
|
|
|
droppedCheckPointInfos := make([]*datapb.SegmentInfo, 0)
|
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
for _, droppedSegment := range info.DroppedSegments {
|
|
|
|
for _, position := range toSeekChannels {
|
|
|
|
if droppedSegment != nil &&
|
|
|
|
droppedSegment.DmlPosition.ChannelName == position.ChannelName &&
|
|
|
|
droppedSegment.DmlPosition.Timestamp > position.Timestamp {
|
|
|
|
droppedCheckPointInfos = append(droppedCheckPointInfos, droppedSegment)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
w.node.streaming.replica.addExcludedSegments(collectionID, droppedCheckPointInfos)
|
|
|
|
log.Debug("watchDMChannel, add check points info for dropped segments done",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("droppedCheckPointInfos", droppedCheckPointInfos),
|
|
|
|
)
|
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// create tSafe
|
2021-06-16 12:05:56 +08:00
|
|
|
for _, channel := range vChannels {
|
2021-11-06 11:02:58 +08:00
|
|
|
w.node.tSafeReplica.addTSafe(channel)
|
2021-06-15 12:41:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// add flow graph
|
|
|
|
if loadPartition {
|
2021-11-06 11:02:58 +08:00
|
|
|
w.node.dataSyncService.addPartitionFlowGraph(collectionID, partitionID, vChannels)
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Query node add partition flow graphs", zap.Any("channels", vChannels))
|
2021-06-15 12:41:40 +08:00
|
|
|
} else {
|
2021-11-06 11:02:58 +08:00
|
|
|
w.node.dataSyncService.addCollectionFlowGraph(collectionID, vChannels)
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Query node add collection flow graphs", zap.Any("channels", vChannels))
|
2021-06-15 12:41:40 +08:00
|
|
|
}
|
|
|
|
|
2021-09-24 13:57:54 +08:00
|
|
|
// add tSafe watcher if queryCollection exists
|
|
|
|
qc, err := w.node.queryService.getQueryCollection(collectionID)
|
|
|
|
if err == nil {
|
|
|
|
for _, channel := range vChannels {
|
|
|
|
err = qc.addTSafeWatcher(channel)
|
|
|
|
if err != nil {
|
|
|
|
// tSafe have been exist, not error
|
|
|
|
log.Warn(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// channels as consumer
|
2021-06-15 20:06:10 +08:00
|
|
|
var nodeFGs map[Channel]*queryNodeFlowGraph
|
2021-06-15 12:41:40 +08:00
|
|
|
if loadPartition {
|
2021-11-06 11:02:58 +08:00
|
|
|
nodeFGs, err = w.node.dataSyncService.getPartitionFlowGraphs(partitionID, vChannels)
|
2021-06-15 12:41:40 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2021-11-06 11:02:58 +08:00
|
|
|
nodeFGs, err = w.node.dataSyncService.getCollectionFlowGraphs(collectionID, vChannels)
|
2021-06-15 12:41:40 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-09 11:37:55 +08:00
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
for _, channel := range toSubChannels {
|
2021-06-09 11:37:55 +08:00
|
|
|
for _, fg := range nodeFGs {
|
|
|
|
if fg.channel == channel {
|
2021-06-15 20:06:10 +08:00
|
|
|
// use pChannel to consume
|
|
|
|
err := fg.consumerFlowGraph(VPChannels[channel], consumeSubName)
|
2021-06-09 11:37:55 +08:00
|
|
|
if err != nil {
|
|
|
|
errMsg := "msgStream consume error :" + err.Error()
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn(errMsg)
|
2021-06-09 11:37:55 +08:00
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-15 12:41:40 +08:00
|
|
|
log.Debug("as consumer channels",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("toSubChannels", toSubChannels))
|
2021-06-09 11:37:55 +08:00
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// seek channel
|
|
|
|
for _, pos := range toSeekChannels {
|
2021-06-09 11:37:55 +08:00
|
|
|
for _, fg := range nodeFGs {
|
|
|
|
if fg.channel == pos.ChannelName {
|
2021-06-15 12:41:40 +08:00
|
|
|
pos.MsgGroup = consumeSubName
|
2021-06-15 20:06:10 +08:00
|
|
|
// use pChannel to seek
|
|
|
|
pos.ChannelName = VPChannels[fg.channel]
|
2021-06-09 11:37:55 +08:00
|
|
|
err := fg.seekQueryNodeFlowGraph(pos)
|
|
|
|
if err != nil {
|
|
|
|
errMsg := "msgStream seek error :" + err.Error()
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn(errMsg)
|
2021-06-09 11:37:55 +08:00
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
|
|
|
}
|
2021-04-16 14:40:33 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Seek all channel done",
|
2021-06-15 12:41:40 +08:00
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("toSeekChannels", toSeekChannels))
|
2021-05-28 15:40:32 +08:00
|
|
|
|
2021-11-12 18:27:10 +08:00
|
|
|
// load growing segments
|
|
|
|
unFlushedSegments := make([]*queryPb.SegmentLoadInfo, 0)
|
|
|
|
unFlushedSegmentIDs := make([]UniqueID, 0)
|
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
for _, ufInfo := range info.UnflushedSegments {
|
2021-11-16 21:41:50 +08:00
|
|
|
// unFlushed segment may not have binLogs, skip loading
|
|
|
|
if len(ufInfo.Binlogs) > 0 {
|
|
|
|
unFlushedSegments = append(unFlushedSegments, &queryPb.SegmentLoadInfo{
|
|
|
|
SegmentID: ufInfo.ID,
|
|
|
|
PartitionID: ufInfo.PartitionID,
|
|
|
|
CollectionID: ufInfo.CollectionID,
|
|
|
|
BinlogPaths: ufInfo.Binlogs,
|
|
|
|
NumOfRows: ufInfo.NumOfRows,
|
|
|
|
Statslogs: ufInfo.Statslogs,
|
|
|
|
Deltalogs: ufInfo.Deltalogs,
|
|
|
|
})
|
|
|
|
unFlushedSegmentIDs = append(unFlushedSegmentIDs, ufInfo.ID)
|
|
|
|
}
|
2021-11-12 18:27:10 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
req := &queryPb.LoadSegmentsRequest{
|
|
|
|
Infos: unFlushedSegments,
|
|
|
|
CollectionID: collectionID,
|
|
|
|
Schema: w.req.Schema,
|
|
|
|
}
|
|
|
|
log.Debug("loading growing segments in WatchDmChannels...",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("unFlushedSegmentIDs", unFlushedSegmentIDs),
|
|
|
|
)
|
|
|
|
err = w.node.loader.loadSegment(req, segmentTypeGrowing)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Debug("load growing segments done in WatchDmChannels",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("unFlushedSegmentIDs", unFlushedSegmentIDs),
|
|
|
|
)
|
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
// start flow graphs
|
|
|
|
if loadPartition {
|
2021-11-06 11:02:58 +08:00
|
|
|
err = w.node.dataSyncService.startPartitionFlowGraph(partitionID, vChannels)
|
2021-06-15 12:41:40 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2021-11-06 11:02:58 +08:00
|
|
|
err = w.node.dataSyncService.startCollectionFlowGraph(collectionID, vChannels)
|
2021-06-15 12:41:40 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-05-28 15:40:32 +08:00
|
|
|
}
|
2021-06-09 11:37:55 +08:00
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
log.Debug("WatchDmChannels done", zap.String("ChannelIDs", fmt.Sprintln(vChannels)))
|
2021-04-16 14:40:33 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDmChannelsTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
// watchDeltaChannelsTask
|
|
|
|
func (w *watchDeltaChannelsTask) Timestamp() Timestamp {
|
|
|
|
if w.req.Base == nil {
|
|
|
|
log.Warn("nil base req in watchDeltaChannelsTask", zap.Any("collectionID", w.req.CollectionID))
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return w.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDeltaChannelsTask) OnEnqueue() error {
|
|
|
|
if w.req == nil || w.req.Base == nil {
|
|
|
|
w.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
w.SetID(w.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDeltaChannelsTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDeltaChannelsTask) Execute(ctx context.Context) error {
|
|
|
|
collectionID := w.req.CollectionID
|
|
|
|
|
|
|
|
// get all vChannels
|
|
|
|
vDeltaChannels := make([]Channel, 0)
|
|
|
|
pDeltaChannels := make([]Channel, 0)
|
|
|
|
VPDeltaChannels := make(map[string]string) // map[vChannel]pChannel
|
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
v := info.ChannelName
|
|
|
|
p := rootcoord.ToPhysicalChannel(info.ChannelName)
|
|
|
|
vDeltaChannels = append(vDeltaChannels, v)
|
|
|
|
pDeltaChannels = append(pDeltaChannels, p)
|
|
|
|
VPDeltaChannels[v] = p
|
|
|
|
}
|
|
|
|
log.Debug("Starting WatchDeltaChannels ...",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("vDeltaChannels", vDeltaChannels),
|
|
|
|
zap.Any("pChannels", pDeltaChannels),
|
|
|
|
)
|
|
|
|
if len(VPDeltaChannels) != len(vDeltaChannels) {
|
|
|
|
return errors.New("get physical channels failed, illegal channel length, collectionID = " + fmt.Sprintln(collectionID))
|
|
|
|
}
|
|
|
|
log.Debug("Get physical channels done",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
)
|
|
|
|
|
|
|
|
if hasCollectionInHistorical := w.node.historical.replica.hasCollection(collectionID); !hasCollectionInHistorical {
|
|
|
|
return fmt.Errorf("cannot find collection with collectionID, %d", collectionID)
|
|
|
|
}
|
|
|
|
hCol, err := w.node.historical.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-13 08:49:08 +08:00
|
|
|
|
|
|
|
// Check if the same deltaChannel has been watched
|
|
|
|
for _, dstChan := range vDeltaChannels {
|
|
|
|
for _, srcChan := range hCol.vDeltaChannels {
|
|
|
|
if dstChan == srcChan {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
hCol.addVDeltaChannels(vDeltaChannels)
|
|
|
|
hCol.addPDeltaChannels(pDeltaChannels)
|
|
|
|
|
2021-11-11 00:54:45 +08:00
|
|
|
if hasCollectionInStreaming := w.node.streaming.replica.hasCollection(collectionID); !hasCollectionInStreaming {
|
|
|
|
return fmt.Errorf("cannot find collection with collectionID, %d", collectionID)
|
|
|
|
}
|
|
|
|
sCol, err := w.node.streaming.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol.addVDeltaChannels(vDeltaChannels)
|
|
|
|
sCol.addPDeltaChannels(pDeltaChannels)
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
// get subscription name
|
|
|
|
getUniqueSubName := func() string {
|
|
|
|
prefixName := Params.MsgChannelSubName
|
|
|
|
return prefixName + "-" + strconv.FormatInt(collectionID, 10) + "-" + strconv.Itoa(rand.Int())
|
|
|
|
}
|
|
|
|
consumeSubName := getUniqueSubName()
|
|
|
|
|
|
|
|
// group channels by to seeking or consuming
|
|
|
|
toSubChannels := make([]Channel, 0)
|
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
toSubChannels = append(toSubChannels, info.ChannelName)
|
|
|
|
}
|
|
|
|
log.Debug("watchDeltaChannel, group channels done", zap.Any("collectionID", collectionID))
|
|
|
|
|
|
|
|
// create tSafe
|
|
|
|
for _, channel := range vDeltaChannels {
|
|
|
|
w.node.tSafeReplica.addTSafe(channel)
|
|
|
|
}
|
|
|
|
|
|
|
|
w.node.dataSyncService.addCollectionDeltaFlowGraph(collectionID, vDeltaChannels)
|
|
|
|
|
|
|
|
// add tSafe watcher if queryCollection exists
|
|
|
|
qc, err := w.node.queryService.getQueryCollection(collectionID)
|
|
|
|
if err == nil {
|
|
|
|
for _, channel := range vDeltaChannels {
|
|
|
|
err = qc.addTSafeWatcher(channel)
|
|
|
|
if err != nil {
|
|
|
|
// tSafe have been exist, not error
|
|
|
|
log.Warn(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// channels as consumer
|
|
|
|
var nodeFGs map[Channel]*queryNodeFlowGraph
|
|
|
|
nodeFGs, err = w.node.dataSyncService.getCollectionDeltaFlowGraphs(collectionID, vDeltaChannels)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, channel := range toSubChannels {
|
|
|
|
for _, fg := range nodeFGs {
|
|
|
|
if fg.channel == channel {
|
|
|
|
// use pChannel to consume
|
2021-11-21 07:33:14 +08:00
|
|
|
err := fg.consumerFlowGraphLatest(VPDeltaChannels[channel], consumeSubName)
|
2021-11-09 09:27:04 +08:00
|
|
|
if err != nil {
|
|
|
|
errMsg := "msgStream consume error :" + err.Error()
|
|
|
|
log.Warn(errMsg)
|
|
|
|
return errors.New(errMsg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.Debug("as consumer channels",
|
|
|
|
zap.Any("collectionID", collectionID),
|
|
|
|
zap.Any("toSubChannels", toSubChannels))
|
|
|
|
|
2021-11-21 07:33:14 +08:00
|
|
|
for _, info := range w.req.Infos {
|
|
|
|
w.node.loader.FromDmlCPLoadDelete(w.ctx, collectionID, info.SeekPosition)
|
|
|
|
}
|
2021-11-09 09:27:04 +08:00
|
|
|
|
|
|
|
// start flow graphs
|
|
|
|
err = w.node.dataSyncService.startCollectionDeltaFlowGraph(collectionID, vDeltaChannels)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("WatchDeltaChannels done", zap.String("ChannelIDs", fmt.Sprintln(vDeltaChannels)))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *watchDeltaChannelsTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
// loadSegmentsTask
|
|
|
|
func (l *loadSegmentsTask) Timestamp() Timestamp {
|
2021-04-16 14:40:33 +08:00
|
|
|
if l.req.Base == nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn("nil base req in loadSegmentsTask")
|
2021-04-16 14:40:33 +08:00
|
|
|
return 0
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
return l.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *loadSegmentsTask) OnEnqueue() error {
|
|
|
|
if l.req == nil || l.req.Base == nil {
|
|
|
|
l.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
l.SetID(l.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *loadSegmentsTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *loadSegmentsTask) Execute(ctx context.Context) error {
|
|
|
|
// TODO: support db
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Query node load segment", zap.String("loadSegmentRequest", fmt.Sprintln(l.req)))
|
2021-06-15 12:41:40 +08:00
|
|
|
var err error
|
|
|
|
|
2021-08-03 22:01:27 +08:00
|
|
|
// init meta
|
|
|
|
for _, info := range l.req.Infos {
|
|
|
|
collectionID := info.CollectionID
|
|
|
|
partitionID := info.PartitionID
|
|
|
|
hasCollectionInHistorical := l.node.historical.replica.hasCollection(collectionID)
|
|
|
|
hasPartitionInHistorical := l.node.historical.replica.hasPartition(partitionID)
|
|
|
|
if !hasCollectionInHistorical {
|
|
|
|
err = l.node.historical.replica.addCollection(collectionID, l.req.Schema)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !hasPartitionInHistorical {
|
|
|
|
err = l.node.historical.replica.addPartition(collectionID, partitionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hasCollectionInStreaming := l.node.streaming.replica.hasCollection(collectionID)
|
|
|
|
hasPartitionInStreaming := l.node.streaming.replica.hasPartition(partitionID)
|
|
|
|
if !hasCollectionInStreaming {
|
|
|
|
err = l.node.streaming.replica.addCollection(collectionID, l.req.Schema)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !hasPartitionInStreaming {
|
|
|
|
err = l.node.streaming.replica.addPartition(collectionID, partitionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-12 18:27:10 +08:00
|
|
|
err = l.node.loader.loadSegment(l.req, segmentTypeSealed)
|
2021-04-12 09:18:43 +08:00
|
|
|
if err != nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn(err.Error())
|
2021-04-12 09:18:43 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-06-26 16:08:11 +08:00
|
|
|
for _, info := range l.req.Infos {
|
|
|
|
collectionID := info.CollectionID
|
|
|
|
partitionID := info.PartitionID
|
|
|
|
sCol, err := l.node.streaming.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol.deleteReleasedPartition(partitionID)
|
|
|
|
hCol, err := l.node.historical.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
hCol.deleteReleasedPartition(partitionID)
|
|
|
|
}
|
|
|
|
|
2021-06-15 12:41:40 +08:00
|
|
|
log.Debug("LoadSegments done", zap.String("SegmentLoadInfos", fmt.Sprintln(l.req.Infos)))
|
2021-04-12 09:18:43 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *loadSegmentsTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// releaseCollectionTask
|
|
|
|
func (r *releaseCollectionTask) Timestamp() Timestamp {
|
2021-04-16 14:40:33 +08:00
|
|
|
if r.req.Base == nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn("nil base req in releaseCollectionTask", zap.Any("collectionID", r.req.CollectionID))
|
2021-04-16 14:40:33 +08:00
|
|
|
return 0
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
return r.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releaseCollectionTask) OnEnqueue() error {
|
|
|
|
if r.req == nil || r.req.Base == nil {
|
|
|
|
r.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
r.SetID(r.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releaseCollectionTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
type ReplicaType int
|
|
|
|
|
|
|
|
const (
|
|
|
|
replicaNone ReplicaType = iota
|
|
|
|
replicaStreaming
|
|
|
|
replicaHistorical
|
|
|
|
)
|
|
|
|
|
2021-04-12 09:18:43 +08:00
|
|
|
func (r *releaseCollectionTask) Execute(ctx context.Context) error {
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Execute release collection task", zap.Any("collectionID", r.req.CollectionID))
|
2021-08-27 17:03:56 +08:00
|
|
|
errMsg := "release collection failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
|
2021-11-11 00:54:45 +08:00
|
|
|
log.Debug("release streaming", zap.Any("collectionID", r.req.CollectionID))
|
2021-12-09 14:31:28 +08:00
|
|
|
// sleep to wait for query tasks done
|
|
|
|
const gracefulReleaseTime = 1
|
|
|
|
time.Sleep(gracefulReleaseTime * time.Second)
|
|
|
|
log.Debug("Starting release collection...",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
)
|
|
|
|
|
|
|
|
// remove query collection
|
|
|
|
r.node.queryService.stopQueryCollection(r.req.CollectionID)
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
err := r.releaseReplica(r.node.streaming.replica, replicaStreaming)
|
2021-06-09 11:37:55 +08:00
|
|
|
if err != nil {
|
2021-11-09 09:27:04 +08:00
|
|
|
return errors.New(errMsg + err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove collection metas in streaming and historical
|
2021-11-11 00:54:45 +08:00
|
|
|
log.Debug("release historical", zap.Any("collectionID", r.req.CollectionID))
|
2021-11-09 09:27:04 +08:00
|
|
|
err = r.releaseReplica(r.node.historical.replica, replicaHistorical)
|
|
|
|
if err != nil {
|
|
|
|
return errors.New(errMsg + err.Error())
|
2021-06-15 12:41:40 +08:00
|
|
|
}
|
2021-11-09 09:27:04 +08:00
|
|
|
r.node.historical.removeGlobalSegmentIDsByCollectionID(r.req.CollectionID)
|
2021-08-27 17:03:56 +08:00
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
log.Debug("ReleaseCollection done", zap.Int64("collectionID", r.req.CollectionID))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releaseCollectionTask) releaseReplica(replica ReplicaInterface, replicaType ReplicaType) error {
|
|
|
|
collection, err := replica.getCollectionByID(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-27 17:03:56 +08:00
|
|
|
// set release time
|
2021-11-11 00:54:45 +08:00
|
|
|
log.Debug("set release time", zap.Any("collectionID", r.req.CollectionID))
|
2021-06-15 12:41:40 +08:00
|
|
|
collection.setReleaseTime(r.req.Base.Timestamp)
|
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
if replicaType == replicaStreaming {
|
|
|
|
r.node.dataSyncService.removeCollectionFlowGraph(r.req.CollectionID)
|
|
|
|
// remove partition flow graphs which partitions belong to the target collection
|
|
|
|
partitionIDs, err := replica.getPartitionIDs(r.req.CollectionID)
|
2021-08-27 17:03:56 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2021-05-28 10:26:30 +08:00
|
|
|
}
|
2021-11-09 09:27:04 +08:00
|
|
|
for _, partitionID := range partitionIDs {
|
|
|
|
r.node.dataSyncService.removePartitionFlowGraph(partitionID)
|
|
|
|
}
|
|
|
|
// remove all tSafes of the target collection
|
|
|
|
for _, channel := range collection.getVChannels() {
|
|
|
|
log.Debug("Releasing tSafe in releaseCollectionTask...",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("vChannel", channel),
|
|
|
|
)
|
|
|
|
// no tSafe in tSafeReplica, don't return error
|
2021-11-26 01:33:16 +08:00
|
|
|
_ = r.node.tSafeReplica.removeTSafe(channel)
|
|
|
|
// queryCollection and Collection would be deleted in releaseCollection,
|
|
|
|
// so we don't need to remove the tSafeWatcher or channel manually.
|
2021-11-09 09:27:04 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
|
|
|
|
// remove all tSafes of the target collection
|
|
|
|
for _, channel := range collection.getVDeltaChannels() {
|
|
|
|
log.Debug("Releasing tSafe in releaseCollectionTask...",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("vDeltaChannel", channel),
|
|
|
|
)
|
|
|
|
// no tSafe in tSafeReplica, don't return error
|
2021-11-26 01:33:16 +08:00
|
|
|
_ = r.node.tSafeReplica.removeTSafe(channel)
|
|
|
|
// queryCollection and Collection would be deleted in releaseCollection,
|
|
|
|
// so we don't need to remove the tSafeWatcher or channel manually.
|
2021-08-27 17:03:56 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
|
2021-11-09 09:27:04 +08:00
|
|
|
// remove excludedSegments record
|
|
|
|
replica.removeExcludedSegments(r.req.CollectionID)
|
|
|
|
err = replica.removeCollection(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releaseCollectionTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// releasePartitionsTask
|
|
|
|
func (r *releasePartitionsTask) Timestamp() Timestamp {
|
2021-04-16 14:40:33 +08:00
|
|
|
if r.req.Base == nil {
|
2021-07-31 10:47:22 +08:00
|
|
|
log.Warn("nil base req in releasePartitionsTask", zap.Any("collectionID", r.req.CollectionID))
|
2021-04-16 14:40:33 +08:00
|
|
|
return 0
|
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
return r.req.Base.Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releasePartitionsTask) OnEnqueue() error {
|
|
|
|
if r.req == nil || r.req.Base == nil {
|
|
|
|
r.SetID(rand.Int63n(100000000000))
|
|
|
|
} else {
|
|
|
|
r.SetID(r.req.Base.MsgID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releasePartitionsTask) PreExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releasePartitionsTask) Execute(ctx context.Context) error {
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Execute release partition task",
|
2021-06-15 12:41:40 +08:00
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("partitionIDs", r.req.PartitionIDs))
|
2021-08-27 17:03:56 +08:00
|
|
|
errMsg := "release partitions failed, collectionID = " + strconv.FormatInt(r.req.CollectionID, 10) + ", err = "
|
2021-06-15 12:41:40 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
// sleep to wait for query tasks done
|
2021-06-23 18:04:22 +08:00
|
|
|
const gracefulReleaseTime = 1
|
2021-08-27 17:03:56 +08:00
|
|
|
time.Sleep(gracefulReleaseTime * time.Second)
|
2021-06-19 18:38:07 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
// get collection from streaming and historical
|
|
|
|
hCol, err := r.node.historical.replica.getCollectionByID(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol, err := r.node.streaming.replica.getCollectionByID(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-11 00:54:45 +08:00
|
|
|
log.Debug("start release partition", zap.Any("collectionID", r.req.CollectionID))
|
2021-05-28 10:26:30 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
// release partitions
|
|
|
|
vChannels := sCol.getVChannels()
|
|
|
|
for _, id := range r.req.PartitionIDs {
|
2021-11-09 09:27:04 +08:00
|
|
|
if _, err := r.node.dataSyncService.getPartitionFlowGraphs(id, vChannels); err == nil {
|
2021-11-06 11:02:58 +08:00
|
|
|
r.node.dataSyncService.removePartitionFlowGraph(id)
|
2021-08-27 17:03:56 +08:00
|
|
|
// remove all tSafes of the target partition
|
|
|
|
for _, channel := range vChannels {
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Releasing tSafe in releasePartitionTask...",
|
2021-08-27 17:03:56 +08:00
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("partitionID", id),
|
|
|
|
zap.Any("vChannel", channel),
|
|
|
|
)
|
2021-09-24 13:57:54 +08:00
|
|
|
// no tSafe in tSafeReplica, don't return error
|
2021-11-26 01:33:16 +08:00
|
|
|
isRemoved := r.node.tSafeReplica.removeTSafe(channel)
|
|
|
|
if isRemoved {
|
|
|
|
// no tSafe or tSafe has been removed,
|
|
|
|
// we need to remove the corresponding tSafeWatcher in queryCollection,
|
|
|
|
// and remove the corresponding channel in collection
|
|
|
|
qc, err := r.node.queryService.getQueryCollection(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = qc.removeTSafeWatcher(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol.removeVChannel(channel)
|
2021-11-29 20:11:49 +08:00
|
|
|
hCol.removeVChannel(channel)
|
2021-09-24 13:57:54 +08:00
|
|
|
}
|
2021-06-24 19:06:34 +08:00
|
|
|
}
|
2021-08-27 17:03:56 +08:00
|
|
|
}
|
2021-06-24 19:06:34 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
// remove partition from streaming and historical
|
|
|
|
hasPartitionInHistorical := r.node.historical.replica.hasPartition(id)
|
|
|
|
if hasPartitionInHistorical {
|
2021-11-09 09:27:04 +08:00
|
|
|
err := r.node.historical.replica.removePartition(id)
|
2021-08-27 17:03:56 +08:00
|
|
|
if err != nil {
|
|
|
|
// not return, try to release all partitions
|
|
|
|
log.Warn(errMsg + err.Error())
|
2021-05-28 10:26:30 +08:00
|
|
|
}
|
2021-08-27 17:03:56 +08:00
|
|
|
}
|
|
|
|
hasPartitionInStreaming := r.node.streaming.replica.hasPartition(id)
|
|
|
|
if hasPartitionInStreaming {
|
2021-11-09 09:27:04 +08:00
|
|
|
err := r.node.streaming.replica.removePartition(id)
|
2021-08-27 17:03:56 +08:00
|
|
|
if err != nil {
|
|
|
|
// not return, try to release all partitions
|
|
|
|
log.Warn(errMsg + err.Error())
|
2021-06-23 18:04:22 +08:00
|
|
|
}
|
2021-04-12 09:18:43 +08:00
|
|
|
}
|
2021-06-23 18:04:22 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
hCol.addReleasedPartition(id)
|
|
|
|
sCol.addReleasedPartition(id)
|
|
|
|
}
|
2021-11-11 00:54:45 +08:00
|
|
|
pids, err := r.node.historical.replica.getPartitionIDs(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-11-25 17:37:15 +08:00
|
|
|
log.Debug("start release history pids", zap.Any("pids", pids), zap.Any("load type", hCol.getLoadType()))
|
2021-11-11 00:54:45 +08:00
|
|
|
if len(pids) == 0 && hCol.getLoadType() == loadTypePartition {
|
|
|
|
r.node.dataSyncService.removeCollectionDeltaFlowGraph(r.req.CollectionID)
|
2021-11-25 17:37:15 +08:00
|
|
|
log.Debug("release delta channels", zap.Any("deltaChannels", hCol.getVDeltaChannels()))
|
2021-11-11 00:54:45 +08:00
|
|
|
vChannels := hCol.getVDeltaChannels()
|
|
|
|
for _, channel := range vChannels {
|
|
|
|
log.Debug("Releasing tSafe in releasePartitionTask...",
|
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("vChannel", channel),
|
|
|
|
)
|
|
|
|
// no tSafe in tSafeReplica, don't return error
|
2021-11-26 01:33:16 +08:00
|
|
|
isRemoved := r.node.tSafeReplica.removeTSafe(channel)
|
|
|
|
if isRemoved {
|
|
|
|
// no tSafe or tSafe has been removed,
|
|
|
|
// we need to remove the corresponding tSafeWatcher in queryCollection,
|
|
|
|
// and remove the corresponding channel in collection
|
|
|
|
qc, err := r.node.queryService.getQueryCollection(r.req.CollectionID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = qc.removeTSafeWatcher(channel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sCol.removeVDeltaChannel(channel)
|
2021-11-29 20:11:49 +08:00
|
|
|
hCol.removeVDeltaChannel(channel)
|
2021-11-11 00:54:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-14 11:15:54 +08:00
|
|
|
|
2021-08-27 17:03:56 +08:00
|
|
|
// release global segment info
|
|
|
|
r.node.historical.removeGlobalSegmentIDsByPartitionIds(r.req.PartitionIDs)
|
2021-06-15 12:41:40 +08:00
|
|
|
|
2021-10-22 14:11:52 +08:00
|
|
|
log.Debug("Release partition task done",
|
2021-06-15 12:41:40 +08:00
|
|
|
zap.Any("collectionID", r.req.CollectionID),
|
|
|
|
zap.Any("partitionIDs", r.req.PartitionIDs))
|
2021-04-12 09:18:43 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *releasePartitionsTask) PostExecute(ctx context.Context) error {
|
|
|
|
return nil
|
|
|
|
}
|