2021-10-15 18:07:09 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
2021-04-19 15:16:33 +08:00
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
2021-10-15 18:07:09 +08:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2021-04-19 15:16:33 +08:00
|
|
|
//
|
2021-10-15 18:07:09 +08:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2021-04-19 15:16:33 +08:00
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
package datanode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-02-28 19:11:55 +08:00
|
|
|
"fmt"
|
2022-09-04 09:05:09 +08:00
|
|
|
"math"
|
2022-05-24 21:11:59 +08:00
|
|
|
"reflect"
|
2021-03-23 18:50:13 +08:00
|
|
|
"sync"
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-09-11 11:36:22 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
2023-01-12 16:09:39 +08:00
|
|
|
"go.opentelemetry.io/otel/trace"
|
2021-11-04 15:40:14 +08:00
|
|
|
"go.uber.org/atomic"
|
2021-02-26 10:13:36 +08:00
|
|
|
"go.uber.org/zap"
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2022-10-16 20:49:27 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
2023-03-04 23:21:50 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/msgpb"
|
2023-03-23 19:43:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/datanode/allocator"
|
2021-05-26 12:09:03 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2022-02-28 19:11:55 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/storage"
|
2023-01-06 14:49:36 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
|
|
|
"github.com/milvus-io/milvus/pkg/metrics"
|
|
|
|
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/retry"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
2021-01-19 11:37:16 +08:00
|
|
|
)
|
|
|
|
|
2021-03-25 14:41:46 +08:00
|
|
|
type insertBufferNode struct {
|
|
|
|
BaseNode
|
2022-05-24 21:11:59 +08:00
|
|
|
|
2022-12-13 16:15:21 +08:00
|
|
|
ctx context.Context
|
|
|
|
channelName string
|
|
|
|
delBufferManager *DelBufferManager // manager of delete msg
|
|
|
|
channel Channel
|
2023-03-23 19:43:57 +08:00
|
|
|
idAllocator allocator.Allocator
|
2021-09-18 14:25:50 +08:00
|
|
|
|
2021-09-23 16:03:54 +08:00
|
|
|
flushMap sync.Map
|
2021-10-18 12:34:34 +08:00
|
|
|
flushChan <-chan flushMsg
|
2022-05-25 14:34:00 +08:00
|
|
|
resendTTChan <-chan resendTTMsg
|
2021-09-23 16:03:54 +08:00
|
|
|
flushingSegCache *Cache
|
2021-10-19 11:04:34 +08:00
|
|
|
flushManager flushManager
|
2021-03-25 14:41:46 +08:00
|
|
|
|
2021-12-15 10:53:16 +08:00
|
|
|
timeTickStream msgstream.MsgStream
|
2021-12-30 10:09:48 +08:00
|
|
|
ttLogger *timeTickLogger
|
2021-12-15 10:53:16 +08:00
|
|
|
ttMerger *mergedTimeTickerSender
|
2021-12-22 16:59:05 +08:00
|
|
|
|
|
|
|
lastTimestamp Timestamp
|
2021-11-04 15:40:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type timeTickLogger struct {
|
2021-12-30 10:09:48 +08:00
|
|
|
start atomic.Uint64
|
|
|
|
counter atomic.Int32
|
|
|
|
vChannelName string
|
2021-11-04 15:40:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *timeTickLogger) LogTs(ts Timestamp) {
|
|
|
|
if l.counter.Load() == 0 {
|
|
|
|
l.start.Store(ts)
|
|
|
|
}
|
|
|
|
l.counter.Inc()
|
|
|
|
if l.counter.Load() == 1000 {
|
|
|
|
min := l.start.Load()
|
|
|
|
l.start.Store(ts)
|
|
|
|
l.counter.Store(0)
|
|
|
|
go l.printLogs(min, ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *timeTickLogger) printLogs(start, end Timestamp) {
|
|
|
|
t1, _ := tsoutil.ParseTS(start)
|
|
|
|
t2, _ := tsoutil.ParseTS(end)
|
2021-12-30 10:09:48 +08:00
|
|
|
log.Debug("IBN timetick log", zap.Time("from", t1), zap.Time("to", t2), zap.Duration("elapsed", t2.Sub(t1)), zap.Uint64("start", start), zap.Uint64("end", end), zap.String("vChannelName", l.vChannelName))
|
2021-06-04 16:31:34 +08:00
|
|
|
}
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
func (ibNode *insertBufferNode) Name() string {
|
2021-12-30 10:33:46 +08:00
|
|
|
return "ibNode-" + ibNode.channelName
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-09-17 16:27:56 +08:00
|
|
|
func (ibNode *insertBufferNode) Close() {
|
2021-11-05 14:59:32 +08:00
|
|
|
ibNode.ttMerger.close()
|
|
|
|
|
2021-09-17 16:27:56 +08:00
|
|
|
if ibNode.timeTickStream != nil {
|
|
|
|
ibNode.timeTickStream.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-06 14:49:36 +08:00
|
|
|
func (ibNode *insertBufferNode) IsValidInMsg(in []Msg) bool {
|
|
|
|
if !ibNode.BaseNode.IsValidInMsg(in) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
_, ok := in[0].(*flowGraphMsg)
|
2021-01-19 11:37:16 +08:00
|
|
|
if !ok {
|
2023-01-06 14:49:36 +08:00
|
|
|
log.Warn("type assertion failed for flowGraphMsg", zap.String("name", reflect.TypeOf(in[0]).Name()))
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ibNode *insertBufferNode) Operate(in []Msg) []Msg {
|
|
|
|
fgMsg := in[0].(*flowGraphMsg)
|
2023-02-15 17:26:34 +08:00
|
|
|
|
|
|
|
// replace pchannel with vchannel
|
2023-03-04 23:21:50 +08:00
|
|
|
startPositions := make([]*msgpb.MsgPosition, 0, len(fgMsg.startPositions))
|
2023-02-15 17:26:34 +08:00
|
|
|
for idx := range fgMsg.startPositions {
|
2023-03-04 23:21:50 +08:00
|
|
|
pos := proto.Clone(fgMsg.startPositions[idx]).(*msgpb.MsgPosition)
|
2023-02-15 17:26:34 +08:00
|
|
|
pos.ChannelName = ibNode.channelName
|
|
|
|
startPositions = append(startPositions, pos)
|
|
|
|
}
|
|
|
|
fgMsg.startPositions = startPositions
|
2023-03-04 23:21:50 +08:00
|
|
|
endPositions := make([]*msgpb.MsgPosition, 0, len(fgMsg.endPositions))
|
2023-02-15 17:26:34 +08:00
|
|
|
for idx := range fgMsg.endPositions {
|
2023-03-04 23:21:50 +08:00
|
|
|
pos := proto.Clone(fgMsg.endPositions[idx]).(*msgpb.MsgPosition)
|
2023-02-15 17:26:34 +08:00
|
|
|
pos.ChannelName = ibNode.channelName
|
|
|
|
endPositions = append(endPositions, pos)
|
|
|
|
}
|
|
|
|
fgMsg.endPositions = endPositions
|
|
|
|
|
2023-01-06 14:49:36 +08:00
|
|
|
if fgMsg.IsCloseMsg() {
|
|
|
|
if len(fgMsg.endPositions) != 0 {
|
|
|
|
// try to sync all segments
|
|
|
|
segmentsToSync := ibNode.Sync(fgMsg, make([]UniqueID, 0), fgMsg.endPositions[0])
|
|
|
|
res := flowGraphMsg{
|
|
|
|
deleteMessages: []*msgstream.DeleteMsg{},
|
|
|
|
timeRange: fgMsg.timeRange,
|
|
|
|
startPositions: fgMsg.startPositions,
|
|
|
|
endPositions: fgMsg.endPositions,
|
|
|
|
segmentsToSync: segmentsToSync,
|
|
|
|
dropCollection: fgMsg.dropCollection,
|
|
|
|
BaseMsg: flowgraph.NewBaseMsg(true),
|
|
|
|
}
|
|
|
|
return []Msg{&res}
|
|
|
|
}
|
|
|
|
return in
|
2021-03-25 14:41:46 +08:00
|
|
|
}
|
|
|
|
|
2021-12-02 16:39:33 +08:00
|
|
|
if fgMsg.dropCollection {
|
|
|
|
ibNode.flushManager.startDropping()
|
|
|
|
}
|
|
|
|
|
2023-01-12 16:09:39 +08:00
|
|
|
var spans []trace.Span
|
2021-09-26 10:43:57 +08:00
|
|
|
for _, msg := range fgMsg.insertMessages {
|
2023-01-12 16:09:39 +08:00
|
|
|
ctx, sp := startTracer(msg, "InsertBuffer-Node")
|
2021-03-25 14:41:46 +08:00
|
|
|
spans = append(spans, sp)
|
|
|
|
msg.SetTraceCtx(ctx)
|
2021-03-23 01:49:50 +08:00
|
|
|
}
|
|
|
|
|
2023-01-06 14:49:36 +08:00
|
|
|
defer func() {
|
|
|
|
for _, sp := range spans {
|
2023-01-12 16:09:39 +08:00
|
|
|
sp.End()
|
2023-01-06 14:49:36 +08:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-12-22 16:59:05 +08:00
|
|
|
if startPositions[0].Timestamp < ibNode.lastTimestamp {
|
2022-05-24 21:11:59 +08:00
|
|
|
// message stream should guarantee that this should not happen
|
|
|
|
err := fmt.Errorf("insert buffer node consumed old messages, channel = %s, timestamp = %d, lastTimestamp = %d",
|
|
|
|
ibNode.channelName, startPositions[0].Timestamp, ibNode.lastTimestamp)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-12-22 16:59:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ibNode.lastTimestamp = endPositions[0].Timestamp
|
|
|
|
|
2023-02-27 17:47:51 +08:00
|
|
|
// Add segment in channel if need and updating segment row number
|
|
|
|
seg2Upload, err := ibNode.addSegmentAndUpdateRowNum(fgMsg.insertMessages, startPositions[0], endPositions[0])
|
2021-09-17 16:27:56 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// Occurs only if the collectionID is mismatch, should not happen
|
2022-10-18 15:33:26 +08:00
|
|
|
err = fmt.Errorf("update segment states in channel meta wrong, err = %s", err)
|
2022-05-24 21:11:59 +08:00
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
2021-02-04 11:19:48 +08:00
|
|
|
|
2021-09-17 16:27:56 +08:00
|
|
|
// insert messages -> buffer
|
2021-09-26 10:43:57 +08:00
|
|
|
for _, msg := range fgMsg.insertMessages {
|
2022-11-10 22:13:04 +08:00
|
|
|
err := ibNode.bufferInsertMsg(msg, startPositions[0], endPositions[0])
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs when missing schema info or data is misaligned, should not happen
|
|
|
|
err = fmt.Errorf("insertBufferNode msg to buffer failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-06-05 16:21:36 +08:00
|
|
|
}
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2023-02-27 17:47:51 +08:00
|
|
|
ibNode.updateSegmentsMemorySize(seg2Upload)
|
2022-10-20 16:39:29 +08:00
|
|
|
ibNode.DisplayStatistics(seg2Upload)
|
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
segmentsToSync := ibNode.Sync(fgMsg, seg2Upload, endPositions[0])
|
2022-10-20 16:39:29 +08:00
|
|
|
|
|
|
|
ibNode.WriteTimeTick(fgMsg.timeRange.timestampMax, seg2Upload)
|
|
|
|
|
|
|
|
res := flowGraphMsg{
|
2022-10-27 21:25:33 +08:00
|
|
|
deleteMessages: fgMsg.deleteMessages,
|
|
|
|
timeRange: fgMsg.timeRange,
|
|
|
|
startPositions: fgMsg.startPositions,
|
|
|
|
endPositions: fgMsg.endPositions,
|
|
|
|
segmentsToSync: segmentsToSync,
|
|
|
|
dropCollection: fgMsg.dropCollection,
|
2022-10-20 16:39:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// send delete msg to DeleteNode
|
|
|
|
return []Msg{&res}
|
|
|
|
}
|
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
func (ibNode *insertBufferNode) GetBufferIfFull(segID UniqueID) (*BufferData, bool) {
|
2022-11-10 22:13:04 +08:00
|
|
|
if bd, ok := ibNode.channel.getCurInsertBuffer(segID); ok && bd.effectiveCap() <= 0 {
|
|
|
|
return bd, true
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBuffer returns buffer data for a segment, returns nil if segment's not in buffer
|
|
|
|
func (ibNode *insertBufferNode) GetBuffer(segID UniqueID) *BufferData {
|
|
|
|
var buf *BufferData
|
2022-11-10 22:13:04 +08:00
|
|
|
if bd, ok := ibNode.channel.getCurInsertBuffer(segID); ok {
|
|
|
|
buf = bd
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
|
|
|
return buf
|
|
|
|
}
|
|
|
|
|
|
|
|
// CollectSegmentsToSync collects segments from flushChan from DataCoord
|
2022-12-06 17:51:18 +08:00
|
|
|
func (ibNode *insertBufferNode) CollectSegmentsToSync() (flushedSegments []UniqueID) {
|
2022-10-27 21:25:33 +08:00
|
|
|
var (
|
|
|
|
maxBatch = 10
|
|
|
|
targetBatch int
|
|
|
|
)
|
|
|
|
|
|
|
|
size := len(ibNode.flushChan)
|
|
|
|
if size >= maxBatch {
|
|
|
|
targetBatch = maxBatch
|
|
|
|
} else {
|
|
|
|
targetBatch = size
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 1; i <= targetBatch; i++ {
|
|
|
|
fmsg := <-ibNode.flushChan
|
2022-12-06 17:51:18 +08:00
|
|
|
flushedSegments = append(flushedSegments, fmsg.segmentID)
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if targetBatch > 0 {
|
|
|
|
log.Info("(Manual Sync) batch processing flush messages",
|
|
|
|
zap.Int("batchSize", targetBatch),
|
|
|
|
zap.Int64s("flushedSegments", flushedSegments),
|
|
|
|
zap.String("channel", ibNode.channelName),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-12-06 17:51:18 +08:00
|
|
|
return flushedSegments
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
|
|
|
|
2022-10-20 16:39:29 +08:00
|
|
|
// DisplayStatistics logs the statistic changes of segment in mem
|
|
|
|
func (ibNode *insertBufferNode) DisplayStatistics(seg2Upload []UniqueID) {
|
2021-09-26 20:55:59 +08:00
|
|
|
// Find and return the smaller input
|
|
|
|
min := func(former, latter int) (smaller int) {
|
|
|
|
if former <= latter {
|
|
|
|
return former
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
2021-09-26 20:55:59 +08:00
|
|
|
return latter
|
|
|
|
}
|
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
// limit the logging size
|
2021-09-26 20:55:59 +08:00
|
|
|
displaySize := min(10, len(seg2Upload))
|
|
|
|
|
|
|
|
for k, segID := range seg2Upload[:displaySize] {
|
2022-11-10 22:13:04 +08:00
|
|
|
if bd, ok := ibNode.channel.getCurInsertBuffer(segID); ok {
|
2022-10-27 21:25:33 +08:00
|
|
|
log.Info("segment buffer status",
|
|
|
|
zap.Int("no.", k),
|
|
|
|
zap.Int64("segmentID", segID),
|
|
|
|
zap.String("channel", ibNode.channelName),
|
2022-11-10 22:13:04 +08:00
|
|
|
zap.Int64("size", bd.size),
|
2023-02-27 17:47:51 +08:00
|
|
|
zap.Int64("limit", bd.limit),
|
|
|
|
zap.Int64("memorySize", bd.memorySize()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateSegmentsMemorySize updates segments' memory size in channel meta
|
|
|
|
func (ibNode *insertBufferNode) updateSegmentsMemorySize(seg2Upload []UniqueID) {
|
|
|
|
for _, segID := range seg2Upload {
|
2023-03-21 21:37:56 +08:00
|
|
|
var memorySize int64
|
|
|
|
if buffer, ok := ibNode.channel.getCurInsertBuffer(segID); ok {
|
|
|
|
memorySize += buffer.memorySize()
|
|
|
|
}
|
|
|
|
if buffer, ok := ibNode.channel.getCurDeleteBuffer(segID); ok {
|
|
|
|
memorySize += buffer.GetLogSize()
|
2021-09-26 20:55:59 +08:00
|
|
|
}
|
2023-03-21 21:37:56 +08:00
|
|
|
ibNode.channel.updateSegmentMemorySize(segID, memorySize)
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
2022-10-20 16:39:29 +08:00
|
|
|
}
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
type syncTask struct {
|
2022-10-24 10:51:30 +08:00
|
|
|
buffer *BufferData
|
|
|
|
segmentID UniqueID
|
|
|
|
flushed bool
|
|
|
|
dropped bool
|
|
|
|
auto bool
|
|
|
|
}
|
2021-11-11 20:56:49 +08:00
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
func (ibNode *insertBufferNode) FillInSyncTasks(fgMsg *flowGraphMsg, seg2Upload []UniqueID) map[UniqueID]*syncTask {
|
|
|
|
var syncTasks = make(map[UniqueID]*syncTask)
|
2021-11-11 20:56:49 +08:00
|
|
|
|
|
|
|
if fgMsg.dropCollection {
|
2022-10-27 21:25:33 +08:00
|
|
|
// All segments in the collection will be synced, not matter empty buffer or not
|
2022-10-24 10:51:30 +08:00
|
|
|
segmentIDs := ibNode.channel.listAllSegmentIDs()
|
2022-10-27 21:25:33 +08:00
|
|
|
log.Info("Receive drop collection request and syncing all segments",
|
|
|
|
zap.Int64s("segments", segmentIDs),
|
2022-10-20 16:39:29 +08:00
|
|
|
zap.String("channel", ibNode.channelName),
|
2021-11-17 10:07:13 +08:00
|
|
|
)
|
2022-10-27 21:25:33 +08:00
|
|
|
|
|
|
|
for _, segID := range segmentIDs {
|
|
|
|
buf := ibNode.GetBuffer(segID)
|
|
|
|
syncTasks[segID] = &syncTask{
|
|
|
|
buffer: buf, // nil is valid
|
|
|
|
segmentID: segID,
|
2021-11-11 20:56:49 +08:00
|
|
|
flushed: false,
|
|
|
|
dropped: true,
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
2021-11-11 20:56:49 +08:00
|
|
|
}
|
2022-10-27 21:25:33 +08:00
|
|
|
return syncTasks
|
2022-10-24 10:51:30 +08:00
|
|
|
}
|
|
|
|
|
2023-01-06 14:49:36 +08:00
|
|
|
if fgMsg.IsCloseMsg() {
|
|
|
|
// All segments in the collection will be synced, not matter empty buffer or not
|
|
|
|
segmentIDs := ibNode.channel.listAllSegmentIDs()
|
|
|
|
log.Info("Receive close request and syncing all segments",
|
|
|
|
zap.Int64s("segments", segmentIDs),
|
|
|
|
zap.String("channel", ibNode.channelName),
|
|
|
|
)
|
|
|
|
|
|
|
|
for _, segID := range segmentIDs {
|
|
|
|
// if segment has data or delete then force sync
|
|
|
|
insertBuf, hasInsert := ibNode.channel.getCurInsertBuffer(segID)
|
|
|
|
deleteEntry := ibNode.delBufferManager.GetEntriesNum(segID)
|
|
|
|
// if insert buf or or delete buf is not empty, trigger sync
|
|
|
|
if (hasInsert && insertBuf.size > 0) || (deleteEntry > 0) {
|
|
|
|
syncTasks[segID] = &syncTask{
|
|
|
|
buffer: insertBuf, // nil is valid
|
|
|
|
segmentID: segID,
|
|
|
|
flushed: false,
|
|
|
|
dropped: false,
|
|
|
|
auto: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return syncTasks
|
|
|
|
}
|
|
|
|
|
2022-11-10 22:13:04 +08:00
|
|
|
// Auto Sync // TODO: move to segment_sync_policy
|
2022-10-27 21:25:33 +08:00
|
|
|
for _, segID := range seg2Upload {
|
|
|
|
if ibuffer, ok := ibNode.GetBufferIfFull(segID); ok {
|
|
|
|
log.Info("(Auto Sync)",
|
|
|
|
zap.Int64("segmentID", segID),
|
|
|
|
zap.Int64("numRows", ibuffer.size),
|
|
|
|
zap.Int64("limit", ibuffer.limit),
|
|
|
|
zap.String("channel", ibNode.channelName))
|
2022-10-24 10:51:30 +08:00
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
syncTasks[segID] = &syncTask{
|
2022-10-24 10:51:30 +08:00
|
|
|
buffer: ibuffer,
|
2022-10-27 21:25:33 +08:00
|
|
|
segmentID: segID,
|
2022-10-24 10:51:30 +08:00
|
|
|
flushed: false,
|
|
|
|
dropped: false,
|
|
|
|
auto: true,
|
2022-10-27 21:25:33 +08:00
|
|
|
}
|
2021-05-25 15:35:37 +08:00
|
|
|
}
|
2022-10-24 10:51:30 +08:00
|
|
|
}
|
2021-05-25 15:35:37 +08:00
|
|
|
|
2022-12-13 16:15:21 +08:00
|
|
|
// sync delete
|
|
|
|
//here we adopt a quite radical strategy:
|
|
|
|
//every time we make sure that the N biggest delDataBuf can be flushed
|
|
|
|
//when memsize usage reaches a certain level
|
|
|
|
//the aim for taking all these actions is to guarantee that the memory consumed by delBuf will not exceed a limit
|
|
|
|
segmentsToFlush := ibNode.delBufferManager.ShouldFlushSegments()
|
|
|
|
for _, segID := range segmentsToFlush {
|
|
|
|
syncTasks[segID] = &syncTask{
|
|
|
|
buffer: nil, // nil is valid
|
|
|
|
segmentID: segID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-10 22:13:04 +08:00
|
|
|
syncSegmentIDs := ibNode.channel.listSegmentIDsToSync(fgMsg.endPositions[0].Timestamp)
|
|
|
|
for _, segID := range syncSegmentIDs {
|
|
|
|
buf := ibNode.GetBuffer(segID)
|
|
|
|
syncTasks[segID] = &syncTask{
|
|
|
|
buffer: buf, // nil is valid
|
|
|
|
segmentID: segID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(syncSegmentIDs) > 0 {
|
2023-01-06 14:49:36 +08:00
|
|
|
log.Info("sync segments", zap.String("vChannel", ibNode.channelName),
|
2022-11-10 22:13:04 +08:00
|
|
|
zap.Int64s("segIDs", syncSegmentIDs)) // TODO: maybe too many prints here
|
|
|
|
}
|
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
mergeSyncTask := func(segmentIDs []UniqueID, syncTasks map[UniqueID]*syncTask, setupTask func(task *syncTask)) {
|
|
|
|
// Merge auto & manual sync tasks with the same segment ID.
|
|
|
|
for _, segmentID := range segmentIDs {
|
|
|
|
if task, ok := syncTasks[segmentID]; ok {
|
|
|
|
setupTask(task)
|
|
|
|
log.Info("merging sync task, updating flushed flag",
|
|
|
|
zap.Int64("segmentID", segmentID),
|
|
|
|
zap.Bool("flushed", task.flushed),
|
|
|
|
zap.Bool("dropped", task.dropped),
|
|
|
|
)
|
2022-10-29 18:47:33 +08:00
|
|
|
continue
|
2021-10-25 18:03:42 +08:00
|
|
|
}
|
2022-10-27 21:25:33 +08:00
|
|
|
|
|
|
|
buf := ibNode.GetBuffer(segmentID)
|
|
|
|
task := syncTask{
|
|
|
|
buffer: buf, // nil is valid
|
2022-10-24 10:51:30 +08:00
|
|
|
segmentID: segmentID,
|
|
|
|
}
|
|
|
|
setupTask(&task)
|
2022-10-27 21:25:33 +08:00
|
|
|
syncTasks[segmentID] = &task
|
2022-07-15 17:12:27 +08:00
|
|
|
}
|
2022-10-24 10:51:30 +08:00
|
|
|
}
|
2022-07-15 17:12:27 +08:00
|
|
|
|
2022-12-06 17:51:18 +08:00
|
|
|
flushedSegments := ibNode.CollectSegmentsToSync()
|
2022-10-27 21:25:33 +08:00
|
|
|
mergeSyncTask(flushedSegments, syncTasks, func(task *syncTask) {
|
|
|
|
task.flushed = true
|
|
|
|
})
|
2022-11-10 22:13:04 +08:00
|
|
|
mergeSyncTask(syncSegmentIDs, syncTasks, func(task *syncTask) {})
|
2022-07-15 17:12:27 +08:00
|
|
|
|
2022-10-24 10:51:30 +08:00
|
|
|
// process drop partition
|
|
|
|
for _, partitionDrop := range fgMsg.dropPartitions {
|
|
|
|
segmentIDs := ibNode.channel.listPartitionSegments(partitionDrop)
|
2022-10-27 21:25:33 +08:00
|
|
|
log.Info("(Drop Partition) syncing all segments in the partition",
|
2022-10-24 10:51:30 +08:00
|
|
|
zap.Int64("collectionID", ibNode.channel.getCollectionID()),
|
|
|
|
zap.Int64("partitionID", partitionDrop),
|
|
|
|
zap.Int64s("segmentIDs", segmentIDs),
|
2022-10-27 21:25:33 +08:00
|
|
|
zap.String("channel", ibNode.channelName),
|
2022-10-24 10:51:30 +08:00
|
|
|
)
|
2022-10-27 21:25:33 +08:00
|
|
|
mergeSyncTask(segmentIDs, syncTasks, func(task *syncTask) {
|
|
|
|
task.flushed = true
|
|
|
|
task.dropped = true
|
|
|
|
})
|
2021-10-25 18:03:42 +08:00
|
|
|
}
|
2022-10-27 21:25:33 +08:00
|
|
|
return syncTasks
|
2022-10-24 10:51:30 +08:00
|
|
|
}
|
|
|
|
|
2023-03-04 23:21:50 +08:00
|
|
|
func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID, endPosition *msgpb.MsgPosition) []UniqueID {
|
2022-10-27 21:25:33 +08:00
|
|
|
syncTasks := ibNode.FillInSyncTasks(fgMsg, seg2Upload)
|
|
|
|
segmentsToSync := make([]UniqueID, 0, len(syncTasks))
|
2023-03-21 21:37:56 +08:00
|
|
|
ibNode.channel.(*ChannelMeta).needToSync.Store(false)
|
2021-10-25 18:03:42 +08:00
|
|
|
|
2022-10-27 21:25:33 +08:00
|
|
|
for _, task := range syncTasks {
|
|
|
|
log.Info("insertBufferNode syncing BufferData",
|
2022-10-20 16:39:29 +08:00
|
|
|
zap.Int64("segmentID", task.segmentID),
|
2022-07-08 14:50:21 +08:00
|
|
|
zap.Bool("flushed", task.flushed),
|
|
|
|
zap.Bool("dropped", task.dropped),
|
2022-10-27 21:25:33 +08:00
|
|
|
zap.Bool("auto", task.auto),
|
2022-10-20 16:39:29 +08:00
|
|
|
zap.Any("position", endPosition),
|
2022-10-27 21:25:33 +08:00
|
|
|
zap.String("channel", ibNode.channelName),
|
2022-07-08 14:50:21 +08:00
|
|
|
)
|
2022-10-31 17:41:34 +08:00
|
|
|
// use the flushed pk stats to take current stat
|
|
|
|
var pkStats []*storage.PrimaryKeyStats
|
2023-01-06 14:49:36 +08:00
|
|
|
// TODO, this has to be async flush, no need to block here.
|
2022-10-31 17:41:34 +08:00
|
|
|
err := retry.Do(ibNode.ctx, func() error {
|
|
|
|
statBlobs, err := ibNode.flushManager.flushBufferData(task.buffer,
|
2022-05-24 21:11:59 +08:00
|
|
|
task.segmentID,
|
|
|
|
task.flushed,
|
|
|
|
task.dropped,
|
2022-10-20 16:39:29 +08:00
|
|
|
endPosition)
|
2022-10-31 17:41:34 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
pkStats, err = storage.DeserializeStats(statBlobs)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to deserialize bloom filter files", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2022-09-13 14:12:31 +08:00
|
|
|
}, getFlowGraphRetryOpt())
|
2021-10-19 11:04:34 +08:00
|
|
|
if err != nil {
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
|
|
|
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
|
2022-04-27 23:03:47 +08:00
|
|
|
if task.auto {
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
|
|
|
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
|
2022-04-27 23:03:47 +08:00
|
|
|
}
|
2022-05-24 21:11:59 +08:00
|
|
|
err = fmt.Errorf("insertBufferNode flushBufferData failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2022-04-27 23:03:47 +08:00
|
|
|
}
|
2022-10-27 21:25:33 +08:00
|
|
|
segmentsToSync = append(segmentsToSync, task.segmentID)
|
2022-11-10 22:13:04 +08:00
|
|
|
ibNode.channel.rollInsertBuffer(task.segmentID)
|
2022-10-31 17:41:34 +08:00
|
|
|
ibNode.channel.RollPKstats(task.segmentID, pkStats)
|
2023-02-12 18:56:33 +08:00
|
|
|
ibNode.channel.setSegmentLastSyncTs(task.segmentID, endPosition.GetTimestamp())
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.SuccessLabel).Inc()
|
|
|
|
metrics.DataNodeFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
|
2022-04-27 23:03:47 +08:00
|
|
|
if task.auto {
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.TotalLabel).Inc()
|
|
|
|
metrics.DataNodeAutoFlushBufferCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.FailLabel).Inc()
|
2021-05-18 19:45:00 +08:00
|
|
|
}
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
2022-10-27 21:25:33 +08:00
|
|
|
return segmentsToSync
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2023-02-27 17:47:51 +08:00
|
|
|
// addSegmentAndUpdateRowNum updates row number in channel meta for the segments in insertMsgs.
|
2022-10-21 15:57:28 +08:00
|
|
|
//
|
2023-02-27 17:47:51 +08:00
|
|
|
// If the segment doesn't exist, a new segment will be created.
|
|
|
|
// The segment number of rows will be updated in mem, waiting to be uploaded to DataCoord.
|
2023-03-04 23:21:50 +08:00
|
|
|
func (ibNode *insertBufferNode) addSegmentAndUpdateRowNum(insertMsgs []*msgstream.InsertMsg, startPos, endPos *msgpb.MsgPosition) (seg2Upload []UniqueID, err error) {
|
2021-09-17 16:27:56 +08:00
|
|
|
uniqueSeg := make(map[UniqueID]int64)
|
|
|
|
for _, msg := range insertMsgs {
|
|
|
|
|
|
|
|
currentSegID := msg.GetSegmentID()
|
|
|
|
collID := msg.GetCollectionID()
|
|
|
|
partitionID := msg.GetPartitionID()
|
|
|
|
|
2022-10-18 15:33:26 +08:00
|
|
|
if !ibNode.channel.hasSegment(currentSegID, true) {
|
|
|
|
err = ibNode.channel.addSegment(
|
2022-09-26 18:06:54 +08:00
|
|
|
addSegmentReq{
|
|
|
|
segType: datapb.SegmentType_New,
|
|
|
|
segID: currentSegID,
|
|
|
|
collID: collID,
|
|
|
|
partitionID: partitionID,
|
|
|
|
startPos: startPos,
|
|
|
|
endPos: endPos,
|
|
|
|
})
|
2021-09-17 16:27:56 +08:00
|
|
|
if err != nil {
|
2022-11-10 22:13:04 +08:00
|
|
|
log.Warn("add segment wrong",
|
2021-09-18 09:13:50 +08:00
|
|
|
zap.Int64("segID", currentSegID),
|
|
|
|
zap.Int64("collID", collID),
|
|
|
|
zap.Int64("partID", partitionID),
|
2021-09-27 10:01:59 +08:00
|
|
|
zap.String("chanName", msg.GetShardName()),
|
2021-09-18 09:13:50 +08:00
|
|
|
zap.Error(err))
|
2021-09-17 16:27:56 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
segNum := uniqueSeg[currentSegID]
|
|
|
|
uniqueSeg[currentSegID] = segNum + int64(len(msg.RowIDs))
|
|
|
|
}
|
|
|
|
|
|
|
|
seg2Upload = make([]UniqueID, 0, len(uniqueSeg))
|
|
|
|
for id, num := range uniqueSeg {
|
|
|
|
seg2Upload = append(seg2Upload, id)
|
2023-02-27 17:47:51 +08:00
|
|
|
ibNode.channel.updateSegmentRowNumber(id, num)
|
2021-09-17 16:27:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-18 14:45:50 +08:00
|
|
|
/* #nosec G103 */
|
2021-09-09 15:00:00 +08:00
|
|
|
// bufferInsertMsg put InsertMsg into buffer
|
2022-10-18 15:33:26 +08:00
|
|
|
// 1.1 fetch related schema from channel meta
|
2021-09-09 15:00:00 +08:00
|
|
|
// 1.2 Get buffer data and put data into each field buffer
|
|
|
|
// 1.3 Put back into buffer
|
|
|
|
// 1.4 Update related statistics
|
2023-03-04 23:21:50 +08:00
|
|
|
func (ibNode *insertBufferNode) bufferInsertMsg(msg *msgstream.InsertMsg, startPos, endPos *msgpb.MsgPosition) error {
|
2022-03-25 14:27:25 +08:00
|
|
|
if err := msg.CheckAligned(); err != nil {
|
|
|
|
return err
|
2021-09-09 15:00:00 +08:00
|
|
|
}
|
|
|
|
currentSegID := msg.GetSegmentID()
|
|
|
|
collectionID := msg.GetCollectionID()
|
|
|
|
|
2022-10-18 15:33:26 +08:00
|
|
|
collSchema, err := ibNode.channel.getCollectionSchema(collectionID, msg.EndTs())
|
2021-09-26 20:55:59 +08:00
|
|
|
if err != nil {
|
2022-11-10 22:13:04 +08:00
|
|
|
log.Warn("Get schema wrong:", zap.Error(err))
|
2021-09-26 20:55:59 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-11-10 22:13:04 +08:00
|
|
|
// load or store insertBuffer
|
|
|
|
var buffer *BufferData
|
|
|
|
var loaded bool
|
|
|
|
buffer, loaded = ibNode.channel.getCurInsertBuffer(currentSegID)
|
|
|
|
if !loaded {
|
|
|
|
buffer, err = newBufferData(collSchema)
|
|
|
|
if err != nil {
|
2022-11-12 21:09:04 +08:00
|
|
|
return fmt.Errorf("newBufferData failed, segment=%d, channel=%s, err=%w", currentSegID, ibNode.channelName, err)
|
2021-09-09 15:00:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-04 15:09:56 +08:00
|
|
|
addedBuffer, err := storage.InsertMsgToInsertData(msg, collSchema)
|
|
|
|
if err != nil {
|
2022-11-10 22:13:04 +08:00
|
|
|
log.Warn("failed to transfer insert msg to insert data", zap.Error(err))
|
2022-03-04 15:09:56 +08:00
|
|
|
return err
|
2021-10-13 21:32:33 +08:00
|
|
|
}
|
|
|
|
|
2022-03-04 15:09:56 +08:00
|
|
|
addedPfData, err := storage.GetPkFromInsertData(collSchema, addedBuffer)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("no primary field found in insert msg", zap.Error(err))
|
|
|
|
} else {
|
2022-10-18 15:33:26 +08:00
|
|
|
ibNode.channel.updateSegmentPKRange(currentSegID, addedPfData)
|
2021-09-09 15:00:00 +08:00
|
|
|
}
|
|
|
|
|
2022-03-04 15:09:56 +08:00
|
|
|
// Maybe there are large write zoom if frequent insert requests are met.
|
|
|
|
buffer.buffer = storage.MergeInsertData(buffer.buffer, addedBuffer)
|
|
|
|
|
2022-09-04 09:05:09 +08:00
|
|
|
tsData, err := storage.GetTimestampFromInsertData(addedBuffer)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("no timestamp field found in insert msg", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-09-26 20:55:59 +08:00
|
|
|
// update buffer size
|
2022-03-04 15:09:56 +08:00
|
|
|
buffer.updateSize(int64(msg.NRows()))
|
2022-11-10 22:13:04 +08:00
|
|
|
// update timestamp range and start-end position
|
2022-09-04 09:05:09 +08:00
|
|
|
buffer.updateTimeRange(ibNode.getTimestampRange(tsData))
|
2022-11-10 22:13:04 +08:00
|
|
|
buffer.updateStartAndEndPosition(startPos, endPos)
|
2022-09-04 09:05:09 +08:00
|
|
|
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeConsumeMsgRowsCount.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), metrics.InsertLabel).Add(float64(len(msg.RowData)))
|
2021-09-26 20:55:59 +08:00
|
|
|
|
|
|
|
// store in buffer
|
2022-11-10 22:13:04 +08:00
|
|
|
ibNode.channel.setCurInsertBuffer(currentSegID, buffer)
|
2021-09-18 14:25:50 +08:00
|
|
|
|
2021-09-09 15:00:00 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-09-04 09:05:09 +08:00
|
|
|
func (ibNode *insertBufferNode) getTimestampRange(tsData *storage.Int64FieldData) TimeRange {
|
|
|
|
tr := TimeRange{
|
|
|
|
timestampMin: math.MaxUint64,
|
|
|
|
timestampMax: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, data := range tsData.Data {
|
2022-10-12 17:19:23 +08:00
|
|
|
if uint64(data) < tr.timestampMin {
|
2022-09-04 09:05:09 +08:00
|
|
|
tr.timestampMin = Timestamp(data)
|
|
|
|
}
|
2022-10-12 17:19:23 +08:00
|
|
|
if uint64(data) > tr.timestampMax {
|
2022-09-04 09:05:09 +08:00
|
|
|
tr.timestampMax = Timestamp(data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return tr
|
|
|
|
}
|
|
|
|
|
2022-10-20 16:39:29 +08:00
|
|
|
// WriteTimeTick writes timetick once insertBufferNode operates.
|
|
|
|
func (ibNode *insertBufferNode) WriteTimeTick(ts Timestamp, segmentIDs []int64) {
|
|
|
|
|
|
|
|
select {
|
|
|
|
case resendTTMsg := <-ibNode.resendTTChan:
|
|
|
|
log.Info("resend TT msg received in insertBufferNode",
|
|
|
|
zap.Int64s("segmentIDs", resendTTMsg.segmentIDs))
|
|
|
|
segmentIDs = append(segmentIDs, resendTTMsg.segmentIDs...)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2021-11-04 15:40:14 +08:00
|
|
|
ibNode.ttLogger.LogTs(ts)
|
2021-12-15 10:53:16 +08:00
|
|
|
ibNode.ttMerger.bufferTs(ts, segmentIDs)
|
2022-09-16 09:56:47 +08:00
|
|
|
rateCol.updateFlowGraphTt(ibNode.channelName, ts)
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
2022-05-24 21:11:59 +08:00
|
|
|
|
2021-05-18 19:45:00 +08:00
|
|
|
func (ibNode *insertBufferNode) getCollectionandPartitionIDbySegID(segmentID UniqueID) (collID, partitionID UniqueID, err error) {
|
2022-10-18 15:33:26 +08:00
|
|
|
return ibNode.channel.getCollectionAndPartitionID(segmentID)
|
2021-05-18 19:45:00 +08:00
|
|
|
}
|
|
|
|
|
2022-12-13 16:15:21 +08:00
|
|
|
func newInsertBufferNode(ctx context.Context, collID UniqueID, delBufManager *DelBufferManager, flushCh <-chan flushMsg, resendTTCh <-chan resendTTMsg,
|
2022-05-25 14:34:00 +08:00
|
|
|
fm flushManager, flushingSegCache *Cache, config *nodeConfig) (*insertBufferNode, error) {
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
baseNode := BaseNode{}
|
2021-10-13 11:16:32 +08:00
|
|
|
baseNode.SetMaxQueueLength(config.maxQueueLength)
|
|
|
|
baseNode.SetMaxParallelism(config.maxParallelism)
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-01-21 09:55:25 +08:00
|
|
|
//input stream, data node time tick
|
2021-10-13 11:16:32 +08:00
|
|
|
wTt, err := config.msFactory.NewMsgStream(ctx)
|
2021-08-30 10:03:58 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-12-07 18:01:19 +08:00
|
|
|
wTt.AsProducer([]string{Params.CommonCfg.DataCoordTimeTick.GetValue()})
|
2022-11-04 14:25:38 +08:00
|
|
|
metrics.DataNodeNumProducers.WithLabelValues(fmt.Sprint(paramtable.GetNodeID())).Inc()
|
2022-12-07 18:01:19 +08:00
|
|
|
log.Info("datanode AsProducer", zap.String("TimeTickChannelName", Params.CommonCfg.DataCoordTimeTick.GetValue()))
|
2021-01-21 09:55:25 +08:00
|
|
|
var wTtMsgStream msgstream.MsgStream = wTt
|
|
|
|
|
2021-12-15 10:53:16 +08:00
|
|
|
mt := newMergedTimeTickerSender(func(ts Timestamp, segmentIDs []int64) error {
|
2023-03-04 23:21:50 +08:00
|
|
|
stats := make([]*commonpb.SegmentStats, 0, len(segmentIDs))
|
2021-12-15 10:53:16 +08:00
|
|
|
for _, sid := range segmentIDs {
|
2022-10-18 15:33:26 +08:00
|
|
|
stat, err := config.channel.getSegmentStatisticsUpdates(sid)
|
2021-12-15 10:53:16 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to get segment statistics info", zap.Int64("segmentID", sid), zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
stats = append(stats, stat)
|
|
|
|
}
|
2021-11-05 14:59:32 +08:00
|
|
|
msgPack := msgstream.MsgPack{}
|
|
|
|
timeTickMsg := msgstream.DataNodeTtMsg{
|
|
|
|
BaseMsg: msgstream.BaseMsg{
|
|
|
|
BeginTimestamp: ts,
|
|
|
|
EndTimestamp: ts,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
},
|
2023-03-04 23:21:50 +08:00
|
|
|
DataNodeTtMsg: msgpb.DataNodeTtMsg{
|
2022-10-21 15:57:28 +08:00
|
|
|
Base: commonpbutil.NewMsgBase(
|
|
|
|
commonpbutil.WithMsgType(commonpb.MsgType_DataNodeTt),
|
|
|
|
commonpbutil.WithMsgID(0),
|
|
|
|
commonpbutil.WithTimeStamp(ts),
|
2023-01-12 19:49:40 +08:00
|
|
|
commonpbutil.WithSourceID(config.serverID),
|
2022-10-21 15:57:28 +08:00
|
|
|
),
|
2021-12-15 10:53:16 +08:00
|
|
|
ChannelName: config.vChannelName,
|
|
|
|
Timestamp: ts,
|
|
|
|
SegmentsStats: stats,
|
2021-11-05 14:59:32 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
|
2022-11-07 10:15:02 +08:00
|
|
|
sub := tsoutil.SubByNow(ts)
|
2022-03-15 21:51:21 +08:00
|
|
|
pChan := funcutil.ToPhysicalChannel(config.vChannelName)
|
2022-11-07 10:15:02 +08:00
|
|
|
metrics.DataNodeProduceTimeTickLag.
|
|
|
|
WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), fmt.Sprint(collID), pChan).
|
|
|
|
Set(float64(sub))
|
2021-11-05 14:59:32 +08:00
|
|
|
return wTtMsgStream.Produce(&msgPack)
|
|
|
|
})
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
return &insertBufferNode{
|
2022-11-10 22:13:04 +08:00
|
|
|
ctx: ctx,
|
|
|
|
BaseNode: baseNode,
|
2021-06-02 15:58:33 +08:00
|
|
|
|
2021-12-15 10:53:16 +08:00
|
|
|
timeTickStream: wTtMsgStream,
|
2021-09-23 16:03:54 +08:00
|
|
|
flushMap: sync.Map{},
|
|
|
|
flushChan: flushCh,
|
2022-05-25 14:34:00 +08:00
|
|
|
resendTTChan: resendTTCh,
|
2021-09-23 16:03:54 +08:00
|
|
|
flushingSegCache: flushingSegCache,
|
2021-10-19 11:04:34 +08:00
|
|
|
flushManager: fm,
|
2021-10-13 11:16:32 +08:00
|
|
|
|
2022-12-13 16:15:21 +08:00
|
|
|
delBufferManager: delBufManager,
|
|
|
|
channel: config.channel,
|
|
|
|
idAllocator: config.allocator,
|
|
|
|
channelName: config.vChannelName,
|
|
|
|
ttMerger: mt,
|
|
|
|
ttLogger: &timeTickLogger{vChannelName: config.vChannelName},
|
2021-08-30 10:03:58 +08:00
|
|
|
}, nil
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|