2023-10-31 02:30:16 +08:00
|
|
|
package syncmgr
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
|
|
|
|
"github.com/cockroachdb/errors"
|
|
|
|
"github.com/samber/lo"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
|
|
|
"github.com/milvus-io/milvus/internal/datanode/broker"
|
|
|
|
"github.com/milvus-io/milvus/internal/datanode/metacache"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/retry"
|
|
|
|
)
|
|
|
|
|
|
|
|
// MetaWriter is the interface for SyncManager to write segment sync meta.
|
|
|
|
type MetaWriter interface {
|
|
|
|
UpdateSync(*SyncTask) error
|
2023-11-23 17:26:24 +08:00
|
|
|
UpdateSyncV2(*SyncTaskV2) error
|
2023-11-15 15:24:18 +08:00
|
|
|
DropChannel(string) error
|
2023-10-31 02:30:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type brokerMetaWriter struct {
|
|
|
|
broker broker.Broker
|
|
|
|
opts []retry.Option
|
|
|
|
}
|
|
|
|
|
|
|
|
func BrokerMetaWriter(broker broker.Broker, opts ...retry.Option) MetaWriter {
|
|
|
|
return &brokerMetaWriter{
|
|
|
|
broker: broker,
|
|
|
|
opts: opts,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *brokerMetaWriter) UpdateSync(pack *SyncTask) error {
|
|
|
|
var (
|
2023-11-17 14:40:26 +08:00
|
|
|
checkPoints = []*datapb.CheckPoint{}
|
|
|
|
deltaFieldBinlogs = []*datapb.FieldBinlog{}
|
2023-10-31 02:30:16 +08:00
|
|
|
)
|
|
|
|
|
2023-11-17 14:40:26 +08:00
|
|
|
insertFieldBinlogs := lo.MapToSlice(pack.insertBinlogs, func(_ int64, fieldBinlog *datapb.FieldBinlog) *datapb.FieldBinlog { return fieldBinlog })
|
|
|
|
statsFieldBinlogs := lo.MapToSlice(pack.statsBinlogs, func(_ int64, fieldBinlog *datapb.FieldBinlog) *datapb.FieldBinlog { return fieldBinlog })
|
|
|
|
if len(pack.deltaBinlog.Binlogs) > 0 {
|
|
|
|
deltaFieldBinlogs = append(deltaFieldBinlogs, pack.deltaBinlog)
|
2023-11-15 15:24:18 +08:00
|
|
|
}
|
2023-10-31 02:30:16 +08:00
|
|
|
|
|
|
|
// only current segment checkpoint info,
|
2023-11-04 12:10:17 +08:00
|
|
|
segments := pack.metacache.GetSegmentsBy(metacache.WithSegmentIDs(pack.segmentID))
|
2023-10-31 02:30:16 +08:00
|
|
|
if len(segments) == 0 {
|
|
|
|
return merr.WrapErrSegmentNotFound(pack.segmentID)
|
|
|
|
}
|
|
|
|
segment := segments[0]
|
|
|
|
checkPoints = append(checkPoints, &datapb.CheckPoint{
|
|
|
|
SegmentID: pack.segmentID,
|
2023-11-15 15:24:18 +08:00
|
|
|
NumOfRows: segment.FlushedRows() + pack.batchSize,
|
2023-10-31 02:30:16 +08:00
|
|
|
Position: pack.checkpoint,
|
|
|
|
})
|
|
|
|
|
|
|
|
startPos := lo.Map(pack.metacache.GetSegmentsBy(metacache.WithStartPosNotRecorded()), func(info *metacache.SegmentInfo, _ int) *datapb.SegmentStartPosition {
|
|
|
|
return &datapb.SegmentStartPosition{
|
|
|
|
SegmentID: info.SegmentID(),
|
|
|
|
StartPosition: info.StartPosition(),
|
|
|
|
}
|
|
|
|
})
|
2023-11-17 14:40:26 +08:00
|
|
|
getBinlogNum := func(fBinlog *datapb.FieldBinlog) int { return len(fBinlog.GetBinlogs()) }
|
2023-10-31 02:30:16 +08:00
|
|
|
log.Info("SaveBinlogPath",
|
|
|
|
zap.Int64("SegmentID", pack.segmentID),
|
|
|
|
zap.Int64("CollectionID", pack.collectionID),
|
2023-11-17 21:46:20 +08:00
|
|
|
zap.Int64("ParitionID", pack.partitionID),
|
2023-10-31 02:30:16 +08:00
|
|
|
zap.Any("startPos", startPos),
|
|
|
|
zap.Any("checkPoints", checkPoints),
|
2023-11-17 14:40:26 +08:00
|
|
|
zap.Int("binlogNum", lo.SumBy(insertFieldBinlogs, getBinlogNum)),
|
|
|
|
zap.Int("statslogNum", lo.SumBy(statsFieldBinlogs, getBinlogNum)),
|
|
|
|
zap.Int("deltalogNum", lo.SumBy(deltaFieldBinlogs, getBinlogNum)),
|
2023-10-31 02:30:16 +08:00
|
|
|
zap.String("vChannelName", pack.channelName),
|
|
|
|
)
|
|
|
|
|
|
|
|
req := &datapb.SaveBinlogPathsRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(
|
|
|
|
commonpbutil.WithMsgType(0),
|
|
|
|
commonpbutil.WithMsgID(0),
|
|
|
|
commonpbutil.WithSourceID(paramtable.GetNodeID()),
|
|
|
|
),
|
|
|
|
SegmentID: pack.segmentID,
|
|
|
|
CollectionID: pack.collectionID,
|
2023-11-17 21:46:20 +08:00
|
|
|
PartitionID: pack.partitionID,
|
2023-11-17 14:40:26 +08:00
|
|
|
Field2BinlogPaths: insertFieldBinlogs,
|
|
|
|
Field2StatslogPaths: statsFieldBinlogs,
|
|
|
|
Deltalogs: deltaFieldBinlogs,
|
2023-10-31 02:30:16 +08:00
|
|
|
|
|
|
|
CheckPoints: checkPoints,
|
|
|
|
|
|
|
|
StartPositions: startPos,
|
|
|
|
Flushed: pack.isFlush,
|
2023-11-04 12:10:17 +08:00
|
|
|
Dropped: pack.isDrop,
|
|
|
|
Channel: pack.channelName,
|
2023-11-17 21:46:20 +08:00
|
|
|
SegLevel: pack.level,
|
2023-10-31 02:30:16 +08:00
|
|
|
}
|
|
|
|
err := retry.Do(context.Background(), func() error {
|
|
|
|
err := b.broker.SaveBinlogPaths(context.Background(), req)
|
|
|
|
// Segment not found during stale segment flush. Segment might get compacted already.
|
|
|
|
// Stop retry and still proceed to the end, ignoring this error.
|
|
|
|
if !pack.isFlush && errors.Is(err, merr.ErrSegmentNotFound) {
|
|
|
|
log.Warn("stale segment not found, could be compacted",
|
|
|
|
zap.Int64("segmentID", pack.segmentID))
|
|
|
|
log.Warn("failed to SaveBinlogPaths",
|
|
|
|
zap.Int64("segmentID", pack.segmentID),
|
|
|
|
zap.Error(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// meta error, datanode handles a virtual channel does not belong here
|
|
|
|
if errors.IsAny(err, merr.ErrSegmentNotFound, merr.ErrChannelNotFound) {
|
|
|
|
log.Warn("meta error found, skip sync and start to drop virtual channel", zap.String("channel", pack.channelName))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, b.opts...)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to SaveBinlogPaths",
|
|
|
|
zap.Int64("segmentID", pack.segmentID),
|
|
|
|
zap.Error(err))
|
2023-11-17 21:46:20 +08:00
|
|
|
return err
|
2023-10-31 02:30:16 +08:00
|
|
|
}
|
2023-11-17 21:46:20 +08:00
|
|
|
|
|
|
|
pack.metacache.UpdateSegments(metacache.SetStartPosRecorded(true), metacache.WithSegmentIDs(lo.Map(startPos, func(pos *datapb.SegmentStartPosition, _ int) int64 { return pos.GetSegmentID() })...))
|
|
|
|
|
|
|
|
return nil
|
2023-10-31 02:30:16 +08:00
|
|
|
}
|
2023-11-15 15:24:18 +08:00
|
|
|
|
2023-11-23 17:26:24 +08:00
|
|
|
func (b *brokerMetaWriter) UpdateSyncV2(pack *SyncTaskV2) error {
|
|
|
|
checkPoints := []*datapb.CheckPoint{}
|
|
|
|
|
|
|
|
// only current segment checkpoint info,
|
|
|
|
segments := pack.metacache.GetSegmentsBy(metacache.WithSegmentIDs(pack.segmentID))
|
|
|
|
if len(segments) == 0 {
|
|
|
|
return merr.WrapErrSegmentNotFound(pack.segmentID)
|
|
|
|
}
|
|
|
|
segment := segments[0]
|
|
|
|
checkPoints = append(checkPoints, &datapb.CheckPoint{
|
|
|
|
SegmentID: pack.segmentID,
|
|
|
|
NumOfRows: segment.FlushedRows() + pack.batchSize,
|
|
|
|
Position: pack.checkpoint,
|
|
|
|
})
|
|
|
|
|
|
|
|
startPos := lo.Map(pack.metacache.GetSegmentsBy(metacache.WithStartPosNotRecorded()), func(info *metacache.SegmentInfo, _ int) *datapb.SegmentStartPosition {
|
|
|
|
return &datapb.SegmentStartPosition{
|
|
|
|
SegmentID: info.SegmentID(),
|
|
|
|
StartPosition: info.StartPosition(),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
log.Info("SaveBinlogPath",
|
|
|
|
zap.Int64("SegmentID", pack.segmentID),
|
|
|
|
zap.Int64("CollectionID", pack.collectionID),
|
|
|
|
zap.Any("startPos", startPos),
|
|
|
|
zap.Any("checkPoints", checkPoints),
|
|
|
|
zap.String("vChannelName", pack.channelName),
|
|
|
|
)
|
|
|
|
|
|
|
|
req := &datapb.SaveBinlogPathsRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(
|
|
|
|
commonpbutil.WithSourceID(paramtable.GetNodeID()),
|
|
|
|
),
|
|
|
|
SegmentID: pack.segmentID,
|
|
|
|
CollectionID: pack.collectionID,
|
|
|
|
|
|
|
|
CheckPoints: checkPoints,
|
|
|
|
StorageVersion: pack.storageVersion,
|
|
|
|
|
|
|
|
StartPositions: startPos,
|
|
|
|
Flushed: pack.isFlush,
|
|
|
|
Dropped: pack.isDrop,
|
|
|
|
Channel: pack.channelName,
|
|
|
|
}
|
|
|
|
err := retry.Do(context.Background(), func() error {
|
|
|
|
err := b.broker.SaveBinlogPaths(context.Background(), req)
|
|
|
|
// Segment not found during stale segment flush. Segment might get compacted already.
|
|
|
|
// Stop retry and still proceed to the end, ignoring this error.
|
|
|
|
if !pack.isFlush && errors.Is(err, merr.ErrSegmentNotFound) {
|
|
|
|
log.Warn("stale segment not found, could be compacted",
|
|
|
|
zap.Int64("segmentID", pack.segmentID))
|
|
|
|
log.Warn("failed to SaveBinlogPaths",
|
|
|
|
zap.Int64("segmentID", pack.segmentID),
|
|
|
|
zap.Error(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// meta error, datanode handles a virtual channel does not belong here
|
|
|
|
if errors.IsAny(err, merr.ErrSegmentNotFound, merr.ErrChannelNotFound) {
|
|
|
|
log.Warn("meta error found, skip sync and start to drop virtual channel", zap.String("channel", pack.channelName))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, b.opts...)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to SaveBinlogPaths",
|
|
|
|
zap.Int64("segmentID", pack.segmentID),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-11-15 15:24:18 +08:00
|
|
|
func (b *brokerMetaWriter) DropChannel(channelName string) error {
|
|
|
|
err := retry.Do(context.Background(), func() error {
|
|
|
|
status, err := b.broker.DropVirtualChannel(context.Background(), &datapb.DropVirtualChannelRequest{
|
|
|
|
Base: commonpbutil.NewMsgBase(
|
|
|
|
commonpbutil.WithSourceID(paramtable.GetNodeID()),
|
|
|
|
),
|
|
|
|
ChannelName: channelName,
|
|
|
|
})
|
|
|
|
return merr.CheckRPCCall(status, err)
|
|
|
|
}, b.opts...)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to DropChannel",
|
|
|
|
zap.String("channel", channelName),
|
|
|
|
zap.Error(err))
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|