2023-03-27 00:42:00 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package delegator
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
2024-04-24 17:01:24 +08:00
|
|
|
"runtime"
|
2023-11-21 18:24:22 +08:00
|
|
|
"sort"
|
2024-03-20 11:35:07 +08:00
|
|
|
"time"
|
2023-03-27 00:42:00 +08:00
|
|
|
|
|
|
|
"github.com/cockroachdb/errors"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/samber/lo"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2023-06-09 01:28:37 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
2023-11-07 01:44:18 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2023-03-27 00:42:00 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
|
|
|
"github.com/milvus-io/milvus/internal/querynodev2/cluster"
|
|
|
|
"github.com/milvus-io/milvus/internal/querynodev2/delegator/deletebuffer"
|
|
|
|
"github.com/milvus-io/milvus/internal/querynodev2/pkoracle"
|
|
|
|
"github.com/milvus-io/milvus/internal/querynodev2/segments"
|
|
|
|
"github.com/milvus-io/milvus/internal/storage"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/common"
|
2023-07-24 14:09:00 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
2023-07-21 15:30:59 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/metrics"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
|
|
|
"github.com/milvus-io/milvus/pkg/mq/msgstream/mqwrapper"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
2024-04-24 17:01:24 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/conc"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/retry"
|
2023-07-21 15:30:59 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
2023-03-27 00:42:00 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// delegator data related part
|
|
|
|
|
|
|
|
// InsertData
|
|
|
|
type InsertData struct {
|
|
|
|
RowIDs []int64
|
|
|
|
PrimaryKeys []storage.PrimaryKey
|
|
|
|
Timestamps []uint64
|
|
|
|
InsertRecord *segcorepb.InsertRecord
|
|
|
|
StartPosition *msgpb.MsgPosition
|
|
|
|
PartitionID int64
|
|
|
|
}
|
|
|
|
|
|
|
|
type DeleteData struct {
|
|
|
|
PartitionID int64
|
|
|
|
PrimaryKeys []storage.PrimaryKey
|
|
|
|
Timestamps []uint64
|
|
|
|
RowCount int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append appends another delete data into this one.
|
|
|
|
func (d *DeleteData) Append(ad DeleteData) {
|
|
|
|
d.PrimaryKeys = append(d.PrimaryKeys, ad.PrimaryKeys...)
|
|
|
|
d.Timestamps = append(d.Timestamps, ad.Timestamps...)
|
|
|
|
d.RowCount += ad.RowCount
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessInsert handles insert data in delegator.
|
|
|
|
func (sd *shardDelegator) ProcessInsert(insertRecords map[int64]*InsertData) {
|
2023-07-21 15:30:59 +08:00
|
|
|
method := "ProcessInsert"
|
|
|
|
tr := timerecord.NewTimeRecorder(method)
|
2023-03-27 00:42:00 +08:00
|
|
|
log := sd.getLogger(context.Background())
|
|
|
|
for segmentID, insertData := range insertRecords {
|
|
|
|
growing := sd.segmentManager.GetGrowing(segmentID)
|
2024-04-10 15:29:17 +08:00
|
|
|
newGrowingSegment := false
|
2023-03-27 00:42:00 +08:00
|
|
|
if growing == nil {
|
2023-10-26 10:10:10 +08:00
|
|
|
var err error
|
2024-03-19 11:53:05 +08:00
|
|
|
// TODO: It's a wired implementation that growing segment have load info.
|
|
|
|
// we should separate the growing segment and sealed segment by type system.
|
2023-11-07 01:44:18 +08:00
|
|
|
growing, err = segments.NewSegment(
|
2023-12-27 16:10:47 +08:00
|
|
|
context.Background(),
|
2023-11-07 01:44:18 +08:00
|
|
|
sd.collection,
|
|
|
|
segments.SegmentTypeGrowing,
|
|
|
|
0,
|
2024-03-19 11:53:05 +08:00
|
|
|
&querypb.SegmentLoadInfo{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
PartitionID: insertData.PartitionID,
|
|
|
|
CollectionID: sd.collectionID,
|
|
|
|
InsertChannel: sd.vchannelName,
|
|
|
|
StartPosition: insertData.StartPosition,
|
|
|
|
DeltaPosition: insertData.StartPosition,
|
|
|
|
Level: datapb.SegmentLevel_L1,
|
|
|
|
},
|
2023-11-07 01:44:18 +08:00
|
|
|
)
|
2023-10-26 10:10:10 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to create new segment",
|
|
|
|
zap.Int64("segmentID", segmentID),
|
|
|
|
zap.Error(err))
|
|
|
|
panic(err)
|
|
|
|
}
|
2024-04-10 15:29:17 +08:00
|
|
|
newGrowingSegment = true
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
|
2023-12-27 16:10:47 +08:00
|
|
|
err := growing.Insert(context.Background(), insertData.RowIDs, insertData.Timestamps, insertData.InsertRecord)
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to insert data into growing segment",
|
|
|
|
zap.Int64("segmentID", segmentID),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
2023-07-12 19:48:29 +08:00
|
|
|
if errors.IsAny(err, merr.ErrSegmentNotLoaded, merr.ErrSegmentNotFound) {
|
|
|
|
log.Warn("try to insert data into released segment, skip it", zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
2023-03-27 00:42:00 +08:00
|
|
|
// panic here, insert failure
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
growing.UpdateBloomFilter(insertData.PrimaryKeys)
|
|
|
|
|
2024-04-10 15:29:17 +08:00
|
|
|
if newGrowingSegment {
|
|
|
|
sd.growingSegmentLock.Lock()
|
|
|
|
// check whether segment has been excluded
|
|
|
|
if ok := sd.VerifyExcludedSegments(segmentID, typeutil.MaxTimestamp); !ok {
|
|
|
|
log.Warn("try to insert data into released segment, skip it", zap.Int64("segmentID", segmentID))
|
|
|
|
sd.growingSegmentLock.Unlock()
|
2024-05-06 20:29:30 +08:00
|
|
|
growing.Release(context.Background())
|
2024-04-10 15:29:17 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !sd.pkOracle.Exists(growing, paramtable.GetNodeID()) {
|
|
|
|
// register created growing segment after insert, avoid to add empty growing to delegator
|
|
|
|
sd.pkOracle.Register(growing, paramtable.GetNodeID())
|
2024-05-06 20:29:30 +08:00
|
|
|
sd.segmentManager.Put(context.Background(), segments.SegmentTypeGrowing, growing)
|
2024-04-10 15:29:17 +08:00
|
|
|
sd.addGrowing(SegmentEntry{
|
|
|
|
NodeID: paramtable.GetNodeID(),
|
|
|
|
SegmentID: segmentID,
|
|
|
|
PartitionID: insertData.PartitionID,
|
|
|
|
Version: 0,
|
|
|
|
TargetVersion: initialTargetVersion,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
sd.growingSegmentLock.Unlock()
|
2023-10-26 10:10:10 +08:00
|
|
|
}
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
log.Debug("insert into growing segment",
|
|
|
|
zap.Int64("collectionID", growing.Collection()),
|
|
|
|
zap.Int64("segmentID", segmentID),
|
|
|
|
zap.Int("rowCount", len(insertData.RowIDs)),
|
|
|
|
zap.Uint64("maxTimestamp", insertData.Timestamps[len(insertData.Timestamps)-1]),
|
|
|
|
)
|
|
|
|
}
|
2023-07-21 15:30:59 +08:00
|
|
|
metrics.QueryNodeProcessCost.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.InsertLabel).
|
2023-07-25 15:07:01 +08:00
|
|
|
Observe(float64(tr.ElapseSpan().Milliseconds()))
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessDelete handles delete data in delegator.
|
|
|
|
// delegator puts deleteData into buffer first,
|
|
|
|
// then dispatch data to segments acoording to the result of pkOracle.
|
|
|
|
func (sd *shardDelegator) ProcessDelete(deleteData []*DeleteData, ts uint64) {
|
2023-07-21 15:30:59 +08:00
|
|
|
method := "ProcessDelete"
|
|
|
|
tr := timerecord.NewTimeRecorder(method)
|
2023-03-27 00:42:00 +08:00
|
|
|
// block load segment handle delete buffer
|
|
|
|
sd.deleteMut.Lock()
|
|
|
|
defer sd.deleteMut.Unlock()
|
|
|
|
|
|
|
|
log := sd.getLogger(context.Background())
|
|
|
|
|
|
|
|
log.Debug("start to process delete", zap.Uint64("ts", ts))
|
|
|
|
// add deleteData into buffer.
|
|
|
|
cacheItems := make([]deletebuffer.BufferItem, 0, len(deleteData))
|
|
|
|
for _, entry := range deleteData {
|
|
|
|
cacheItems = append(cacheItems, deletebuffer.BufferItem{
|
|
|
|
PartitionID: entry.PartitionID,
|
|
|
|
DeleteData: storage.DeleteData{
|
|
|
|
Pks: entry.PrimaryKeys,
|
|
|
|
Tss: entry.Timestamps,
|
|
|
|
RowCount: entry.RowCount,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
sd.deleteBuffer.Put(&deletebuffer.Item{
|
|
|
|
Ts: ts,
|
|
|
|
Data: cacheItems,
|
|
|
|
})
|
|
|
|
|
|
|
|
// segment => delete data
|
|
|
|
delRecords := make(map[int64]DeleteData)
|
|
|
|
for _, data := range deleteData {
|
|
|
|
for i, pk := range data.PrimaryKeys {
|
|
|
|
segmentIDs, err := sd.pkOracle.Get(pk, pkoracle.WithPartitionID(data.PartitionID))
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to get delete candidates for pk", zap.Any("pk", pk.GetValue()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, segmentID := range segmentIDs {
|
|
|
|
delRecord := delRecords[segmentID]
|
|
|
|
delRecord.PrimaryKeys = append(delRecord.PrimaryKeys, pk)
|
|
|
|
delRecord.Timestamps = append(delRecord.Timestamps, data.Timestamps[i])
|
|
|
|
delRecord.RowCount++
|
|
|
|
delRecords[segmentID] = delRecord
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
offlineSegments := typeutil.NewConcurrentSet[int64]()
|
|
|
|
|
2023-11-01 20:04:24 +08:00
|
|
|
sealed, growing, version := sd.distribution.PinOnlineSegments()
|
2023-03-27 00:42:00 +08:00
|
|
|
|
|
|
|
eg, ctx := errgroup.WithContext(context.Background())
|
|
|
|
for _, entry := range sealed {
|
|
|
|
entry := entry
|
|
|
|
eg.Go(func() error {
|
2023-09-05 10:05:48 +08:00
|
|
|
worker, err := sd.workerManager.GetWorker(ctx, entry.NodeID)
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to get worker",
|
|
|
|
zap.Int64("nodeID", paramtable.GetNodeID()),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
// skip if node down
|
|
|
|
// delete will be processed after loaded again
|
|
|
|
return nil
|
|
|
|
}
|
2023-12-26 14:28:47 +08:00
|
|
|
offlineSegments.Upsert(sd.applyDelete(ctx, entry.NodeID, worker, delRecords, entry.Segments, querypb.DataScope_Historical)...)
|
2023-03-27 00:42:00 +08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if len(growing) > 0 {
|
|
|
|
eg.Go(func() error {
|
2023-09-05 10:05:48 +08:00
|
|
|
worker, err := sd.workerManager.GetWorker(ctx, paramtable.GetNodeID())
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Error("failed to get worker(local)",
|
|
|
|
zap.Int64("nodeID", paramtable.GetNodeID()),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
// panic here, local worker shall not have error
|
|
|
|
panic(err)
|
|
|
|
}
|
2023-12-26 14:28:47 +08:00
|
|
|
offlineSegments.Upsert(sd.applyDelete(ctx, paramtable.GetNodeID(), worker, delRecords, growing, querypb.DataScope_Streaming)...)
|
2023-03-27 00:42:00 +08:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// not error return in apply delete
|
|
|
|
_ = eg.Wait()
|
|
|
|
|
2023-11-01 20:04:24 +08:00
|
|
|
sd.distribution.Unpin(version)
|
2023-03-27 00:42:00 +08:00
|
|
|
offlineSegIDs := offlineSegments.Collect()
|
|
|
|
if len(offlineSegIDs) > 0 {
|
|
|
|
log.Warn("failed to apply delete, mark segment offline", zap.Int64s("offlineSegments", offlineSegIDs))
|
|
|
|
sd.markSegmentOffline(offlineSegIDs...)
|
|
|
|
}
|
2023-07-21 15:30:59 +08:00
|
|
|
|
|
|
|
metrics.QueryNodeProcessCost.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.DeleteLabel).
|
2023-07-25 15:07:01 +08:00
|
|
|
Observe(float64(tr.ElapseSpan().Milliseconds()))
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// applyDelete handles delete record and apply them to corresponding workers.
|
2023-12-26 14:28:47 +08:00
|
|
|
func (sd *shardDelegator) applyDelete(ctx context.Context, nodeID int64, worker cluster.Worker, delRecords map[int64]DeleteData, entries []SegmentEntry, scope querypb.DataScope) []int64 {
|
2024-04-24 17:01:24 +08:00
|
|
|
offlineSegments := typeutil.NewConcurrentSet[int64]()
|
2023-03-27 00:42:00 +08:00
|
|
|
log := sd.getLogger(ctx)
|
2024-04-24 17:01:24 +08:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
pool := conc.NewPool[struct{}](runtime.GOMAXPROCS(0) * 4)
|
|
|
|
defer pool.Release()
|
|
|
|
|
|
|
|
var futures []*conc.Future[struct{}]
|
2023-03-27 00:42:00 +08:00
|
|
|
for _, segmentEntry := range entries {
|
2024-05-07 21:13:47 +08:00
|
|
|
segmentEntry := segmentEntry
|
|
|
|
delRecord, ok := delRecords[segmentEntry.SegmentID]
|
2023-03-27 00:42:00 +08:00
|
|
|
log := log.With(
|
|
|
|
zap.Int64("segmentID", segmentEntry.SegmentID),
|
|
|
|
zap.Int64("workerID", nodeID),
|
2024-05-07 21:13:47 +08:00
|
|
|
zap.Int("forwardRowCount", len(delRecord.PrimaryKeys)),
|
2023-03-27 00:42:00 +08:00
|
|
|
)
|
|
|
|
if ok {
|
2024-04-24 17:01:24 +08:00
|
|
|
future := pool.Submit(func() (struct{}, error) {
|
|
|
|
log.Debug("delegator plan to applyDelete via worker")
|
|
|
|
err := retry.Handle(ctx, func() (bool, error) {
|
|
|
|
if sd.Stopped() {
|
|
|
|
return false, merr.WrapErrChannelNotAvailable(sd.vchannelName, "channel is unsubscribing")
|
|
|
|
}
|
2023-08-23 10:10:22 +08:00
|
|
|
|
2024-04-24 17:01:24 +08:00
|
|
|
err := worker.Delete(ctx, &querypb.DeleteRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(commonpbutil.WithTargetID(nodeID)),
|
|
|
|
CollectionId: sd.collectionID,
|
|
|
|
PartitionId: segmentEntry.PartitionID,
|
|
|
|
VchannelName: sd.vchannelName,
|
|
|
|
SegmentId: segmentEntry.SegmentID,
|
|
|
|
PrimaryKeys: storage.ParsePrimaryKeys2IDs(delRecord.PrimaryKeys),
|
|
|
|
Timestamps: delRecord.Timestamps,
|
|
|
|
Scope: scope,
|
|
|
|
})
|
|
|
|
if errors.Is(err, merr.ErrNodeNotFound) {
|
|
|
|
log.Warn("try to delete data on non-exist node")
|
|
|
|
// cancel other request
|
|
|
|
cancel()
|
|
|
|
return false, err
|
|
|
|
} else if errors.IsAny(err, merr.ErrSegmentNotFound, merr.ErrSegmentNotLoaded) {
|
|
|
|
log.Warn("try to delete data of released segment")
|
|
|
|
return false, nil
|
|
|
|
} else if err != nil {
|
|
|
|
log.Warn("worker failed to delete on segment", zap.Error(err))
|
|
|
|
return true, err
|
|
|
|
}
|
2024-03-20 11:35:07 +08:00
|
|
|
return false, nil
|
2024-04-24 17:01:24 +08:00
|
|
|
}, retry.Attempts(10))
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("apply delete for segment failed, marking it offline")
|
|
|
|
offlineSegments.Insert(segmentEntry.SegmentID)
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
2024-04-24 17:01:24 +08:00
|
|
|
return struct{}{}, err
|
|
|
|
})
|
|
|
|
futures = append(futures, future)
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
}
|
2024-04-24 17:01:24 +08:00
|
|
|
conc.AwaitAll(futures...)
|
|
|
|
return offlineSegments.Collect()
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// markSegmentOffline makes segment go offline and waits for QueryCoord to fix.
|
|
|
|
func (sd *shardDelegator) markSegmentOffline(segmentIDs ...int64) {
|
|
|
|
sd.distribution.AddOfflines(segmentIDs...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// addGrowing add growing segment record for delegator.
|
|
|
|
func (sd *shardDelegator) addGrowing(entries ...SegmentEntry) {
|
|
|
|
log := sd.getLogger(context.Background())
|
|
|
|
log.Info("add growing segments to delegator", zap.Int64s("segmentIDs", lo.Map(entries, func(entry SegmentEntry, _ int) int64 {
|
|
|
|
return entry.SegmentID
|
|
|
|
})))
|
|
|
|
sd.distribution.AddGrowing(entries...)
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoadGrowing load growing segments locally.
|
|
|
|
func (sd *shardDelegator) LoadGrowing(ctx context.Context, infos []*querypb.SegmentLoadInfo, version int64) error {
|
|
|
|
log := sd.getLogger(ctx)
|
|
|
|
|
2023-05-11 15:33:24 +08:00
|
|
|
segmentIDs := lo.Map(infos, func(info *querypb.SegmentLoadInfo, _ int) int64 { return info.GetSegmentID() })
|
|
|
|
log.Info("loading growing segments...", zap.Int64s("segmentIDs", segmentIDs))
|
2023-03-27 00:42:00 +08:00
|
|
|
loaded, err := sd.loader.Load(ctx, sd.collectionID, segments.SegmentTypeGrowing, version, infos...)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to load growing segment", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
2023-05-11 15:33:24 +08:00
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
for _, segment := range loaded {
|
|
|
|
log := log.With(
|
|
|
|
zap.Int64("segmentID", segment.ID()),
|
|
|
|
)
|
|
|
|
deletedPks, deletedTss := sd.GetLevel0Deletions(segment.Partition())
|
|
|
|
if len(deletedPks) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2023-11-07 01:44:18 +08:00
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
log.Info("forwarding L0 delete records...", zap.Int("deletionCount", len(deletedPks)))
|
2023-12-27 16:10:47 +08:00
|
|
|
err = segment.Delete(ctx, deletedPks, deletedTss)
|
2023-11-21 18:24:22 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to forward L0 deletions to growing segment",
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
|
|
|
|
// clear loaded growing segments
|
|
|
|
for _, segment := range loaded {
|
2024-05-06 20:29:30 +08:00
|
|
|
segment.Release(ctx)
|
2023-11-07 01:44:18 +08:00
|
|
|
}
|
2023-11-21 18:24:22 +08:00
|
|
|
return err
|
2023-11-07 01:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-11 15:33:24 +08:00
|
|
|
segmentIDs = lo.Map(loaded, func(segment segments.Segment, _ int) int64 { return segment.ID() })
|
|
|
|
log.Info("load growing segments done", zap.Int64s("segmentIDs", segmentIDs))
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
for _, candidate := range loaded {
|
|
|
|
sd.pkOracle.Register(candidate, paramtable.GetNodeID())
|
|
|
|
}
|
|
|
|
sd.addGrowing(lo.Map(loaded, func(segment segments.Segment, _ int) SegmentEntry {
|
|
|
|
return SegmentEntry{
|
2023-06-27 11:48:45 +08:00
|
|
|
NodeID: paramtable.GetNodeID(),
|
|
|
|
SegmentID: segment.ID(),
|
|
|
|
PartitionID: segment.Partition(),
|
|
|
|
Version: version,
|
|
|
|
TargetVersion: sd.distribution.getTargetVersion(),
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
})...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoadSegments load segments local or remotely depends on the target node.
|
|
|
|
func (sd *shardDelegator) LoadSegments(ctx context.Context, req *querypb.LoadSegmentsRequest) error {
|
2023-11-07 01:44:18 +08:00
|
|
|
if len(req.GetInfos()) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
log := sd.getLogger(ctx)
|
|
|
|
|
|
|
|
targetNodeID := req.GetDstNodeID()
|
|
|
|
// add common log fields
|
|
|
|
log = log.With(
|
|
|
|
zap.Int64("workID", req.GetDstNodeID()),
|
|
|
|
zap.Int64s("segments", lo.Map(req.GetInfos(), func(info *querypb.SegmentLoadInfo, _ int) int64 { return info.GetSegmentID() })),
|
|
|
|
)
|
|
|
|
|
2023-09-05 10:05:48 +08:00
|
|
|
worker, err := sd.workerManager.GetWorker(ctx, targetNodeID)
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("delegator failed to find worker", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// load bloom filter only when candidate not exists
|
|
|
|
infos := lo.Filter(req.GetInfos(), func(info *querypb.SegmentLoadInfo, _ int) bool {
|
|
|
|
return !sd.pkOracle.Exists(pkoracle.NewCandidateKey(info.GetSegmentID(), info.GetPartitionID(), commonpb.SegmentState_Sealed), targetNodeID)
|
|
|
|
})
|
|
|
|
candidates, err := sd.loader.LoadBloomFilterSet(ctx, req.GetCollectionID(), req.GetVersion(), infos...)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to load bloom filter set for segment", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
req.Base.TargetID = req.GetDstNodeID()
|
2023-09-18 10:57:20 +08:00
|
|
|
log.Debug("worker loads segments...")
|
2023-09-10 07:41:18 +08:00
|
|
|
|
|
|
|
sLoad := func(ctx context.Context, req *querypb.LoadSegmentsRequest) error {
|
|
|
|
segmentID := req.GetInfos()[0].GetSegmentID()
|
|
|
|
nodeID := req.GetDstNodeID()
|
|
|
|
_, err, _ := sd.sf.Do(fmt.Sprintf("%d-%d", nodeID, segmentID), func() (struct{}, error) {
|
|
|
|
err := worker.LoadSegments(ctx, req)
|
|
|
|
return struct{}{}, err
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// separate infos into different load task
|
|
|
|
if len(req.GetInfos()) > 1 {
|
|
|
|
var reqs []*querypb.LoadSegmentsRequest
|
|
|
|
for _, info := range req.GetInfos() {
|
|
|
|
newReq := typeutil.Clone(req)
|
|
|
|
newReq.Infos = []*querypb.SegmentLoadInfo{info}
|
|
|
|
reqs = append(reqs, newReq)
|
|
|
|
}
|
|
|
|
|
|
|
|
group, ctx := errgroup.WithContext(ctx)
|
|
|
|
for _, req := range reqs {
|
|
|
|
req := req
|
|
|
|
group.Go(func() error {
|
|
|
|
return sLoad(ctx, req)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
err = group.Wait()
|
|
|
|
} else {
|
|
|
|
err = sLoad(ctx, req)
|
|
|
|
}
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("worker failed to load segments", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
2023-09-18 10:57:20 +08:00
|
|
|
log.Debug("work loads segments done")
|
2023-03-27 00:42:00 +08:00
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
// load index segment need no stream delete and distribution change
|
|
|
|
if req.GetLoadScope() == querypb.LoadScope_Index {
|
2023-07-18 10:51:19 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
entries := lo.Map(req.GetInfos(), func(info *querypb.SegmentLoadInfo, _ int) SegmentEntry {
|
|
|
|
return SegmentEntry{
|
2023-08-08 11:17:08 +08:00
|
|
|
SegmentID: info.GetSegmentID(),
|
|
|
|
PartitionID: info.GetPartitionID(),
|
|
|
|
NodeID: req.GetDstNodeID(),
|
|
|
|
Version: req.GetVersion(),
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
})
|
2023-11-21 18:24:22 +08:00
|
|
|
if req.GetInfos()[0].GetLevel() == datapb.SegmentLevel_L0 {
|
|
|
|
sd.GenerateLevel0DeletionCache()
|
|
|
|
} else {
|
|
|
|
log.Debug("load delete...")
|
|
|
|
err = sd.loadStreamDelete(ctx, candidates, infos, req.GetDeltaPositions(), targetNodeID, worker, entries)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("load stream delete failed", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
2023-08-09 13:05:15 +08:00
|
|
|
}
|
2023-03-27 00:42:00 +08:00
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
// alter distribution
|
|
|
|
sd.distribution.AddDistributions(entries...)
|
|
|
|
|
2024-03-22 13:57:06 +08:00
|
|
|
partStatsToReload := make([]UniqueID, 0)
|
|
|
|
lo.ForEach(req.GetInfos(), func(info *querypb.SegmentLoadInfo, _ int) {
|
|
|
|
partStatsToReload = append(partStatsToReload, info.PartitionID)
|
|
|
|
})
|
2024-04-29 19:11:26 +08:00
|
|
|
if paramtable.Get().QueryNodeCfg.EnableSegmentPrune.GetAsBool() {
|
|
|
|
sd.maybeReloadPartitionStats(ctx, partStatsToReload...)
|
|
|
|
}
|
2024-03-22 13:57:06 +08:00
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
func (sd *shardDelegator) GetLevel0Deletions(partitionID int64) ([]storage.PrimaryKey, []storage.Timestamp) {
|
|
|
|
sd.level0Mut.RLock()
|
|
|
|
deleteData, ok1 := sd.level0Deletions[partitionID]
|
2024-03-20 19:01:05 +08:00
|
|
|
allPartitionsDeleteData, ok2 := sd.level0Deletions[common.AllPartitionsID]
|
2023-11-21 18:24:22 +08:00
|
|
|
sd.level0Mut.RUnlock()
|
|
|
|
// we may need to merge the specified partition deletions and the all partitions deletions,
|
|
|
|
// so release the mutex as early as possible.
|
|
|
|
|
|
|
|
if ok1 && ok2 {
|
|
|
|
pks := make([]storage.PrimaryKey, 0, deleteData.RowCount+allPartitionsDeleteData.RowCount)
|
|
|
|
tss := make([]storage.Timestamp, 0, deleteData.RowCount+allPartitionsDeleteData.RowCount)
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
j := 0
|
|
|
|
for i < int(deleteData.RowCount) || j < int(allPartitionsDeleteData.RowCount) {
|
|
|
|
if i == int(deleteData.RowCount) {
|
|
|
|
pks = append(pks, allPartitionsDeleteData.Pks[j])
|
|
|
|
tss = append(tss, allPartitionsDeleteData.Tss[j])
|
|
|
|
j++
|
|
|
|
} else if j == int(allPartitionsDeleteData.RowCount) {
|
|
|
|
pks = append(pks, deleteData.Pks[i])
|
|
|
|
tss = append(tss, deleteData.Tss[i])
|
|
|
|
i++
|
|
|
|
} else if deleteData.Tss[i] < allPartitionsDeleteData.Tss[j] {
|
|
|
|
pks = append(pks, deleteData.Pks[i])
|
|
|
|
tss = append(tss, deleteData.Tss[i])
|
|
|
|
i++
|
|
|
|
} else {
|
|
|
|
pks = append(pks, allPartitionsDeleteData.Pks[j])
|
|
|
|
tss = append(tss, allPartitionsDeleteData.Tss[j])
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pks, tss
|
|
|
|
} else if ok1 {
|
|
|
|
return deleteData.Pks, deleteData.Tss
|
|
|
|
} else if ok2 {
|
|
|
|
return allPartitionsDeleteData.Pks, allPartitionsDeleteData.Tss
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *shardDelegator) GenerateLevel0DeletionCache() {
|
2024-03-07 23:01:02 +08:00
|
|
|
level0Segments := sd.segmentManager.GetBy(segments.WithLevel(datapb.SegmentLevel_L0), segments.WithChannel(sd.vchannelName))
|
2023-11-21 18:24:22 +08:00
|
|
|
deletions := make(map[int64]*storage.DeleteData)
|
|
|
|
for _, segment := range level0Segments {
|
|
|
|
segment := segment.(*segments.L0Segment)
|
|
|
|
pks, tss := segment.DeleteRecords()
|
|
|
|
deleteData, ok := deletions[segment.Partition()]
|
|
|
|
if !ok {
|
|
|
|
deleteData = storage.NewDeleteData(pks, tss)
|
|
|
|
} else {
|
|
|
|
deleteData.AppendBatch(pks, tss)
|
|
|
|
}
|
|
|
|
deletions[segment.Partition()] = deleteData
|
|
|
|
}
|
|
|
|
|
|
|
|
type DeletePair struct {
|
|
|
|
Pk storage.PrimaryKey
|
|
|
|
Ts storage.Timestamp
|
|
|
|
}
|
|
|
|
for _, deleteData := range deletions {
|
|
|
|
pairs := make([]DeletePair, deleteData.RowCount)
|
|
|
|
for i := range deleteData.Pks {
|
|
|
|
pairs[i] = DeletePair{deleteData.Pks[i], deleteData.Tss[i]}
|
|
|
|
}
|
|
|
|
sort.Slice(pairs, func(i, j int) bool {
|
|
|
|
return pairs[i].Ts < pairs[j].Ts
|
|
|
|
})
|
|
|
|
for i := range pairs {
|
|
|
|
deleteData.Pks[i], deleteData.Tss[i] = pairs[i].Pk, pairs[i].Ts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sd.level0Mut.Lock()
|
|
|
|
defer sd.level0Mut.Unlock()
|
2023-12-07 14:34:35 +08:00
|
|
|
totalSize := int64(0)
|
|
|
|
for _, delete := range deletions {
|
|
|
|
totalSize += delete.Size()
|
|
|
|
}
|
|
|
|
metrics.QueryNodeLevelZeroSize.WithLabelValues(
|
|
|
|
fmt.Sprint(paramtable.GetNodeID()),
|
|
|
|
fmt.Sprint(sd.collectionID),
|
|
|
|
sd.vchannelName,
|
|
|
|
).Set(float64(totalSize))
|
2023-11-21 18:24:22 +08:00
|
|
|
sd.level0Deletions = deletions
|
|
|
|
}
|
|
|
|
|
2023-05-09 19:10:41 +08:00
|
|
|
func (sd *shardDelegator) loadStreamDelete(ctx context.Context,
|
|
|
|
candidates []*pkoracle.BloomFilterSet,
|
|
|
|
infos []*querypb.SegmentLoadInfo,
|
|
|
|
deltaPositions []*msgpb.MsgPosition,
|
2023-08-09 13:05:15 +08:00
|
|
|
targetNodeID int64,
|
|
|
|
worker cluster.Worker,
|
|
|
|
entries []SegmentEntry,
|
|
|
|
) error {
|
2023-03-27 00:42:00 +08:00
|
|
|
log := sd.getLogger(ctx)
|
|
|
|
|
|
|
|
idCandidates := lo.SliceToMap(candidates, func(candidate *pkoracle.BloomFilterSet) (int64, *pkoracle.BloomFilterSet) {
|
|
|
|
return candidate.ID(), candidate
|
|
|
|
})
|
|
|
|
|
2023-12-25 23:27:02 +08:00
|
|
|
sd.deleteMut.RLock()
|
|
|
|
defer sd.deleteMut.RUnlock()
|
2023-03-27 00:42:00 +08:00
|
|
|
// apply buffered delete for new segments
|
|
|
|
// no goroutines here since qnv2 has no load merging logic
|
2023-10-13 16:31:35 +08:00
|
|
|
for _, info := range infos {
|
2023-11-07 01:44:18 +08:00
|
|
|
log := log.With(
|
|
|
|
zap.Int64("segmentID", info.GetSegmentID()),
|
|
|
|
)
|
2023-03-27 00:42:00 +08:00
|
|
|
candidate := idCandidates[info.GetSegmentID()]
|
2023-05-09 19:10:41 +08:00
|
|
|
position := info.GetDeltaPosition()
|
|
|
|
if position == nil { // for compatibility of rolling upgrade from 2.2.x to 2.3
|
2023-10-13 16:31:35 +08:00
|
|
|
// During rolling upgrade, Querynode(2.3) may receive merged LoadSegmentRequest
|
|
|
|
// from QueryCoord(2.2); In version 2.2.x, only segments with the same dmlChannel
|
|
|
|
// can be merged, and deltaPositions will be merged into a single deltaPosition,
|
|
|
|
// so we should use `deltaPositions[0]` as the seek position for all the segments
|
|
|
|
// within the same LoadSegmentRequest.
|
|
|
|
position = deltaPositions[0]
|
2023-05-09 19:10:41 +08:00
|
|
|
}
|
2023-03-27 00:42:00 +08:00
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
deletedPks, deletedTss := sd.GetLevel0Deletions(candidate.Partition())
|
2023-03-27 00:42:00 +08:00
|
|
|
deleteData := &storage.DeleteData{}
|
2023-11-21 18:24:22 +08:00
|
|
|
for i, pk := range deletedPks {
|
2023-11-07 01:44:18 +08:00
|
|
|
if candidate.MayPkExist(pk) {
|
2023-11-21 18:24:22 +08:00
|
|
|
deleteData.Append(pk, deletedTss[i])
|
2023-11-07 01:44:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if deleteData.RowCount > 0 {
|
|
|
|
log.Info("forward L0 delete to worker...",
|
|
|
|
zap.Int64("deleteRowNum", deleteData.RowCount),
|
|
|
|
)
|
|
|
|
err := worker.Delete(ctx, &querypb.DeleteRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(commonpbutil.WithTargetID(targetNodeID)),
|
|
|
|
CollectionId: info.GetCollectionID(),
|
|
|
|
PartitionId: info.GetPartitionID(),
|
|
|
|
SegmentId: info.GetSegmentID(),
|
|
|
|
PrimaryKeys: storage.ParsePrimaryKeys2IDs(deleteData.Pks),
|
|
|
|
Timestamps: deleteData.Tss,
|
2024-01-30 11:13:02 +08:00
|
|
|
Scope: querypb.DataScope_Historical, // only sealed segment need to loadStreamDelete
|
2023-11-07 01:44:18 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to apply delete when LoadSegment", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
deleteData = &storage.DeleteData{}
|
2023-03-27 00:42:00 +08:00
|
|
|
// start position is dml position for segment
|
|
|
|
// if this position is before deleteBuffer's safe ts, it means some delete shall be read from msgstream
|
2023-05-09 19:10:41 +08:00
|
|
|
if position.GetTimestamp() < sd.deleteBuffer.SafeTs() {
|
2023-03-27 00:42:00 +08:00
|
|
|
log.Info("load delete from stream...")
|
2023-11-07 01:44:18 +08:00
|
|
|
streamDeleteData, err := sd.readDeleteFromMsgstream(ctx, position, sd.deleteBuffer.SafeTs(), candidate)
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to read delete data from msgstream", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
2023-11-07 01:44:18 +08:00
|
|
|
|
|
|
|
deleteData.Merge(streamDeleteData)
|
2023-03-27 00:42:00 +08:00
|
|
|
log.Info("load delete from stream done")
|
|
|
|
}
|
|
|
|
|
|
|
|
// list buffered delete
|
2023-05-09 19:10:41 +08:00
|
|
|
deleteRecords := sd.deleteBuffer.ListAfter(position.GetTimestamp())
|
2023-03-27 00:42:00 +08:00
|
|
|
for _, entry := range deleteRecords {
|
|
|
|
for _, record := range entry.Data {
|
2024-03-20 19:01:05 +08:00
|
|
|
if record.PartitionID != common.AllPartitionsID && candidate.Partition() != record.PartitionID {
|
2023-03-27 00:42:00 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
for i, pk := range record.DeleteData.Pks {
|
|
|
|
if candidate.MayPkExist(pk) {
|
2023-11-07 01:44:18 +08:00
|
|
|
deleteData.Append(pk, record.DeleteData.Tss[i])
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if delete count not empty, apply
|
|
|
|
if deleteData.RowCount > 0 {
|
|
|
|
log.Info("forward delete to worker...", zap.Int64("deleteRowNum", deleteData.RowCount))
|
|
|
|
err := worker.Delete(ctx, &querypb.DeleteRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(commonpbutil.WithTargetID(targetNodeID)),
|
|
|
|
CollectionId: info.GetCollectionID(),
|
|
|
|
PartitionId: info.GetPartitionID(),
|
|
|
|
SegmentId: info.GetSegmentID(),
|
|
|
|
PrimaryKeys: storage.ParsePrimaryKeys2IDs(deleteData.Pks),
|
|
|
|
Timestamps: deleteData.Tss,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to apply delete when LoadSegment", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add candidate after load success
|
|
|
|
for _, candidate := range candidates {
|
|
|
|
log.Info("register sealed segment bfs into pko candidates",
|
|
|
|
zap.Int64("segmentID", candidate.ID()),
|
|
|
|
)
|
|
|
|
sd.pkOracle.Register(candidate, targetNodeID)
|
|
|
|
}
|
2023-08-09 13:05:15 +08:00
|
|
|
log.Info("load delete done")
|
2023-03-27 00:42:00 +08:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *shardDelegator) readDeleteFromMsgstream(ctx context.Context, position *msgpb.MsgPosition, safeTs uint64, candidate *pkoracle.BloomFilterSet) (*storage.DeleteData, error) {
|
|
|
|
log := sd.getLogger(ctx).With(
|
|
|
|
zap.String("channel", position.ChannelName),
|
|
|
|
zap.Int64("segmentID", candidate.ID()),
|
|
|
|
)
|
|
|
|
stream, err := sd.factory.NewTtMsgStream(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-07-12 17:26:30 +08:00
|
|
|
defer stream.Close()
|
2023-03-27 00:42:00 +08:00
|
|
|
vchannelName := position.ChannelName
|
|
|
|
pChannelName := funcutil.ToPhysicalChannel(vchannelName)
|
|
|
|
position.ChannelName = pChannelName
|
|
|
|
|
|
|
|
ts, _ := tsoutil.ParseTS(position.Timestamp)
|
|
|
|
|
|
|
|
// Random the subname in case we trying to load same delta at the same time
|
|
|
|
subName := fmt.Sprintf("querynode-delta-loader-%d-%d-%d", paramtable.GetNodeID(), sd.collectionID, rand.Int())
|
2023-07-25 10:43:04 +08:00
|
|
|
log.Info("from dml check point load delete", zap.Any("position", position), zap.String("vChannel", vchannelName), zap.String("subName", subName), zap.Time("positionTs", ts))
|
2023-09-08 09:51:17 +08:00
|
|
|
err = stream.AsConsumer(context.TODO(), []string{pChannelName}, subName, mqwrapper.SubscriptionPositionUnknown)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-03-27 00:42:00 +08:00
|
|
|
|
2024-03-20 11:35:07 +08:00
|
|
|
ts = time.Now()
|
2023-09-08 09:51:17 +08:00
|
|
|
err = stream.Seek(context.TODO(), []*msgpb.MsgPosition{position})
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
result := &storage.DeleteData{}
|
|
|
|
hasMore := true
|
|
|
|
for hasMore {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
log.Debug("read delta msg from seek position done", zap.Error(ctx.Err()))
|
|
|
|
return nil, ctx.Err()
|
|
|
|
case msgPack, ok := <-stream.Chan():
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("stream channel closed, pChannelName=%v, msgID=%v", pChannelName, position.GetMsgID())
|
|
|
|
log.Warn("fail to read delta msg",
|
|
|
|
zap.String("pChannelName", pChannelName),
|
|
|
|
zap.Binary("msgID", position.GetMsgID()),
|
|
|
|
zap.Error(err),
|
|
|
|
)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if msgPack == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tsMsg := range msgPack.Msgs {
|
|
|
|
if tsMsg.Type() == commonpb.MsgType_Delete {
|
|
|
|
dmsg := tsMsg.(*msgstream.DeleteMsg)
|
2024-04-09 15:21:24 +08:00
|
|
|
if dmsg.CollectionID != sd.collectionID || (dmsg.GetPartitionID() != common.AllPartitionsID && dmsg.GetPartitionID() != candidate.Partition()) {
|
2023-03-27 00:42:00 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for idx, pk := range storage.ParseIDs2PrimaryKeys(dmsg.GetPrimaryKeys()) {
|
|
|
|
if candidate.MayPkExist(pk) {
|
|
|
|
result.Pks = append(result.Pks, pk)
|
|
|
|
result.Tss = append(result.Tss, dmsg.Timestamps[idx])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reach safe ts
|
|
|
|
if safeTs <= msgPack.EndPositions[0].GetTimestamp() {
|
|
|
|
hasMore = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-03-20 11:35:07 +08:00
|
|
|
log.Info("successfully read delete from stream ", zap.Duration("time spent", time.Since(ts)))
|
2023-03-27 00:42:00 +08:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2023-04-07 19:32:29 +08:00
|
|
|
// ReleaseSegments releases segments local or remotely depending on the target node.
|
2023-03-27 00:42:00 +08:00
|
|
|
func (sd *shardDelegator) ReleaseSegments(ctx context.Context, req *querypb.ReleaseSegmentsRequest, force bool) error {
|
|
|
|
log := sd.getLogger(ctx)
|
|
|
|
|
|
|
|
targetNodeID := req.GetNodeID()
|
2024-03-07 23:01:02 +08:00
|
|
|
level0Segments := typeutil.NewSet(lo.Map(sd.segmentManager.GetBy(segments.WithLevel(datapb.SegmentLevel_L0), segments.WithChannel(sd.vchannelName)), func(segment segments.Segment, _ int) int64 {
|
2023-11-21 18:24:22 +08:00
|
|
|
return segment.ID()
|
|
|
|
})...)
|
|
|
|
hasLevel0 := false
|
|
|
|
for _, segmentID := range req.GetSegmentIDs() {
|
|
|
|
hasLevel0 = level0Segments.Contain(segmentID)
|
|
|
|
if hasLevel0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
// add common log fields
|
|
|
|
log = log.With(
|
|
|
|
zap.Int64s("segmentIDs", req.GetSegmentIDs()),
|
|
|
|
zap.Int64("nodeID", req.GetNodeID()),
|
|
|
|
zap.String("scope", req.GetScope().String()),
|
|
|
|
zap.Bool("force", force))
|
|
|
|
|
|
|
|
log.Info("delegator start to release segments")
|
|
|
|
// alter distribution first
|
|
|
|
var sealed, growing []SegmentEntry
|
|
|
|
convertSealed := func(segmentID int64, _ int) SegmentEntry {
|
|
|
|
return SegmentEntry{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
NodeID: targetNodeID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
convertGrowing := func(segmentID int64, _ int) SegmentEntry {
|
|
|
|
return SegmentEntry{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch req.GetScope() {
|
|
|
|
case querypb.DataScope_All:
|
|
|
|
sealed = lo.Map(req.GetSegmentIDs(), convertSealed)
|
|
|
|
growing = lo.Map(req.GetSegmentIDs(), convertGrowing)
|
|
|
|
case querypb.DataScope_Streaming:
|
|
|
|
growing = lo.Map(req.GetSegmentIDs(), convertGrowing)
|
|
|
|
case querypb.DataScope_Historical:
|
|
|
|
sealed = lo.Map(req.GetSegmentIDs(), convertSealed)
|
|
|
|
}
|
|
|
|
|
2024-04-10 15:29:17 +08:00
|
|
|
if len(growing) > 0 {
|
|
|
|
sd.growingSegmentLock.Lock()
|
|
|
|
}
|
|
|
|
// when we try to release a segment, add it to pipeline's exclude list first
|
|
|
|
// in case of consumed it's growing segment again
|
|
|
|
droppedInfos := lo.SliceToMap(req.GetSegmentIDs(), func(id int64) (int64, uint64) {
|
2024-04-12 10:39:19 +08:00
|
|
|
if req.GetCheckpoint() == nil {
|
|
|
|
return id, typeutil.MaxTimestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
return id, req.GetCheckpoint().GetTimestamp()
|
2024-04-10 15:29:17 +08:00
|
|
|
})
|
|
|
|
sd.AddExcludedSegments(droppedInfos)
|
|
|
|
|
2023-03-27 00:42:00 +08:00
|
|
|
signal := sd.distribution.RemoveDistributions(sealed, growing)
|
|
|
|
// wait cleared signal
|
|
|
|
<-signal
|
|
|
|
if len(sealed) > 0 {
|
|
|
|
sd.pkOracle.Remove(
|
|
|
|
pkoracle.WithSegmentIDs(lo.Map(sealed, func(entry SegmentEntry, _ int) int64 { return entry.SegmentID })...),
|
|
|
|
pkoracle.WithSegmentType(commonpb.SegmentState_Sealed),
|
|
|
|
pkoracle.WithWorkerID(targetNodeID),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
if len(growing) > 0 {
|
|
|
|
sd.pkOracle.Remove(
|
|
|
|
pkoracle.WithSegmentIDs(lo.Map(growing, func(entry SegmentEntry, _ int) int64 { return entry.SegmentID })...),
|
|
|
|
pkoracle.WithSegmentType(commonpb.SegmentState_Growing),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2024-04-10 15:29:17 +08:00
|
|
|
var releaseErr error
|
2023-03-27 00:42:00 +08:00
|
|
|
if !force {
|
2023-09-05 10:05:48 +08:00
|
|
|
worker, err := sd.workerManager.GetWorker(ctx, targetNodeID)
|
2023-03-27 00:42:00 +08:00
|
|
|
if err != nil {
|
2024-04-10 15:29:17 +08:00
|
|
|
log.Warn("delegator failed to find worker", zap.Error(err))
|
|
|
|
releaseErr = err
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
2023-04-17 18:46:30 +08:00
|
|
|
req.Base.TargetID = targetNodeID
|
2023-03-27 00:42:00 +08:00
|
|
|
err = worker.ReleaseSegments(ctx, req)
|
|
|
|
if err != nil {
|
2024-04-10 15:29:17 +08:00
|
|
|
log.Warn("worker failed to release segments", zap.Error(err))
|
|
|
|
releaseErr = err
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
2024-04-10 15:29:17 +08:00
|
|
|
}
|
|
|
|
if len(growing) > 0 {
|
|
|
|
sd.growingSegmentLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
if releaseErr != nil {
|
|
|
|
return releaseErr
|
2023-03-27 00:42:00 +08:00
|
|
|
}
|
|
|
|
|
2023-11-21 18:24:22 +08:00
|
|
|
if hasLevel0 {
|
|
|
|
sd.GenerateLevel0DeletionCache()
|
|
|
|
}
|
2024-03-22 13:57:06 +08:00
|
|
|
partitionsToReload := make([]UniqueID, 0)
|
|
|
|
lo.ForEach(req.GetSegmentIDs(), func(segmentID int64, _ int) {
|
|
|
|
segment := sd.segmentManager.Get(segmentID)
|
|
|
|
if segment != nil {
|
|
|
|
partitionsToReload = append(partitionsToReload, segment.Partition())
|
|
|
|
}
|
|
|
|
})
|
2024-04-29 19:11:26 +08:00
|
|
|
if paramtable.Get().QueryNodeCfg.EnableSegmentPrune.GetAsBool() {
|
|
|
|
sd.maybeReloadPartitionStats(ctx, partitionsToReload...)
|
|
|
|
}
|
2023-03-27 00:42:00 +08:00
|
|
|
return nil
|
|
|
|
}
|
2023-06-27 11:48:45 +08:00
|
|
|
|
2023-07-24 14:09:00 +08:00
|
|
|
func (sd *shardDelegator) SyncTargetVersion(newVersion int64, growingInTarget []int64,
|
2024-01-04 17:02:46 +08:00
|
|
|
sealedInTarget []int64, droppedInTarget []int64, checkpoint *msgpb.MsgPosition,
|
2023-09-21 09:45:27 +08:00
|
|
|
) {
|
2023-08-29 23:12:27 +08:00
|
|
|
growings := sd.segmentManager.GetBy(
|
|
|
|
segments.WithType(segments.SegmentTypeGrowing),
|
|
|
|
segments.WithChannel(sd.vchannelName),
|
|
|
|
)
|
2023-07-24 14:09:00 +08:00
|
|
|
|
|
|
|
sealedSet := typeutil.NewUniqueSet(sealedInTarget...)
|
|
|
|
growingSet := typeutil.NewUniqueSet(growingInTarget...)
|
|
|
|
droppedSet := typeutil.NewUniqueSet(droppedInTarget...)
|
2023-08-29 23:12:27 +08:00
|
|
|
redundantGrowing := typeutil.NewUniqueSet()
|
2023-07-24 14:09:00 +08:00
|
|
|
for _, s := range growings {
|
|
|
|
if growingSet.Contain(s.ID()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// sealed segment already exists, make growing segment redundant
|
|
|
|
if sealedSet.Contain(s.ID()) {
|
2023-08-29 23:12:27 +08:00
|
|
|
redundantGrowing.Insert(s.ID())
|
2023-07-24 14:09:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// sealed segment already dropped, make growing segment redundant
|
|
|
|
if droppedSet.Contain(s.ID()) {
|
2023-08-29 23:12:27 +08:00
|
|
|
redundantGrowing.Insert(s.ID())
|
2023-07-24 14:09:00 +08:00
|
|
|
}
|
|
|
|
}
|
2023-08-29 23:12:27 +08:00
|
|
|
redundantGrowingIDs := redundantGrowing.Collect()
|
2023-07-24 14:09:00 +08:00
|
|
|
if len(redundantGrowing) > 0 {
|
|
|
|
log.Warn("found redundant growing segments",
|
2023-08-29 23:12:27 +08:00
|
|
|
zap.Int64s("growingSegments", redundantGrowingIDs))
|
2023-07-24 14:09:00 +08:00
|
|
|
}
|
2023-08-29 23:12:27 +08:00
|
|
|
sd.distribution.SyncTargetVersion(newVersion, growingInTarget, sealedInTarget, redundantGrowingIDs)
|
2024-01-04 17:02:46 +08:00
|
|
|
sd.deleteBuffer.TryDiscard(checkpoint.GetTimestamp())
|
2023-06-27 11:48:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *shardDelegator) GetTargetVersion() int64 {
|
|
|
|
return sd.distribution.getTargetVersion()
|
|
|
|
}
|
2024-04-10 15:29:17 +08:00
|
|
|
|
|
|
|
func (sd *shardDelegator) AddExcludedSegments(excludeInfo map[int64]uint64) {
|
|
|
|
sd.excludedSegments.Insert(excludeInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *shardDelegator) VerifyExcludedSegments(segmentID int64, ts uint64) bool {
|
|
|
|
return sd.excludedSegments.Verify(segmentID, ts)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sd *shardDelegator) TryCleanExcludedSegments(ts uint64) {
|
|
|
|
if sd.excludedSegments.ShouldClean() {
|
|
|
|
sd.excludedSegments.CleanInvalid(ts)
|
|
|
|
}
|
|
|
|
}
|