Add more specific logs (#11724)

Signed-off-by: yangxuan <xuan.yang@zilliz.com>
This commit is contained in:
XuanYang-cn 2021-11-15 15:25:09 +08:00 committed by GitHub
parent 4d58ff2df7
commit f3852c1db0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 63 additions and 55 deletions

View File

@ -168,12 +168,17 @@ func (dsService *dataSyncService) initNodes(vchanInfo *datapb.VchannelInfo) erro
Position: pack.pos,
})
startPos := dsService.replica.listNewSegmentsStartPositions()
log.Debug("SaveBinlogPath",
zap.Int64("SegmentID", pack.segmentID),
zap.Int64("CollectionID", dsService.collectionID),
zap.Bool("IsFlushed", pack.flushed),
zap.Bool("IsDropped", pack.dropped),
zap.Int("Length of Field2BinlogPaths", len(fieldInsert)),
zap.Int("Length of Field2Stats", len(fieldStats)),
zap.Int("Length of Field2Deltalogs", len(deltaInfos)),
zap.Any("Listed start positions", startPos),
)
req := &datapb.SaveBinlogPathsRequest{
@ -189,14 +194,15 @@ func (dsService *dataSyncService) initNodes(vchanInfo *datapb.VchannelInfo) erro
Field2StatslogPaths: fieldStats,
Deltalogs: deltaInfos,
CheckPoints: checkPoints,
CheckPoints: checkPoints,
StartPositions: startPos,
StartPositions: dsService.replica.listNewSegmentsStartPositions(),
Flushed: pack.flushed,
Dropped: pack.dropped,
Flushed: pack.flushed,
Dropped: pack.dropped,
}
rsp, err := dsService.dataCoord.SaveBinlogPaths(context.Background(), req)
if err != nil {
log.Warn(err.Error())
return fmt.Errorf(err.Error())
}
if rsp.ErrorCode != commonpb.ErrorCode_Success {

View File

@ -58,6 +58,7 @@ type ddNode struct {
segID2SegInfo sync.Map // segment ID to *SegmentInfo
flushedSegments []*datapb.SegmentInfo
vchannelName string
deltaMsgStream msgstream.MsgStream
dropMode atomic.Value
@ -91,7 +92,9 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
}
if load := ddn.dropMode.Load(); load != nil && load.(bool) {
log.Debug("ddNode in dropMode")
log.Debug("ddNode in dropMode",
zap.String("vchannel name", ddn.vchannelName),
zap.Int64("collection ID", ddn.collectionID))
return []Msg{}
}
@ -274,6 +277,7 @@ func newDDNode(ctx context.Context, collID UniqueID, vchanInfo *datapb.VchannelI
BaseNode: baseNode,
collectionID: collID,
flushedSegments: fs,
vchannelName: vchanInfo.ChannelName,
deltaMsgStream: deltaMsgStream,
}

View File

@ -279,64 +279,62 @@ func (ibNode *insertBufferNode) Operate(in []Msg) []Msg {
dropped: true,
})
}
goto flush // Jump over the auto-flush and manual flush procedure
}
} else {
segmentsToFlush = make([]UniqueID, 0, len(seg2Upload)+1) //auto flush number + possible manual flush
flushTaskList = make([]flushTask, 0, len(seg2Upload)+1)
segmentsToFlush = make([]UniqueID, 0, len(seg2Upload)+1) //auto flush number + possible manual flush
flushTaskList = make([]flushTask, 0, len(seg2Upload)+1)
// Auto Flush
for _, segToFlush := range seg2Upload {
// If full, auto flush
if bd, ok := ibNode.insertBuffer.Load(segToFlush); ok && bd.(*BufferData).effectiveCap() <= 0 {
log.Warn("Auto flush", zap.Int64("segment id", segToFlush))
ibuffer := bd.(*BufferData)
// Auto Flush
for _, segToFlush := range seg2Upload {
// If full, auto flush
if bd, ok := ibNode.insertBuffer.Load(segToFlush); ok && bd.(*BufferData).effectiveCap() <= 0 {
log.Warn("Auto flush", zap.Int64("segment id", segToFlush))
ibuffer := bd.(*BufferData)
flushTaskList = append(flushTaskList, flushTask{
buffer: ibuffer,
segmentID: segToFlush,
flushed: false,
dropped: false,
})
}
}
// Manual Flush
select {
case fmsg := <-ibNode.flushChan:
log.Debug(". Receiving flush message",
zap.Int64("segmentID", fmsg.segmentID),
zap.Int64("collectionID", fmsg.collectionID),
)
// merging auto&manual flush segment same segment id
dup := false
for i, task := range flushTaskList {
if task.segmentID == fmsg.segmentID {
flushTaskList[i].flushed = fmsg.flushed
dup = true
break
flushTaskList = append(flushTaskList, flushTask{
buffer: ibuffer,
segmentID: segToFlush,
flushed: false,
dropped: false,
})
}
}
// if merged, skip load buffer and create task
if !dup {
currentSegID := fmsg.segmentID
bd, ok := ibNode.insertBuffer.Load(currentSegID)
var buf *BufferData
if ok {
buf = bd.(*BufferData)
// Manual Flush
select {
case fmsg := <-ibNode.flushChan:
log.Debug(". Receiving flush message",
zap.Int64("segmentID", fmsg.segmentID),
zap.Int64("collectionID", fmsg.collectionID),
)
// merging auto&manual flush segment same segment id
dup := false
for i, task := range flushTaskList {
if task.segmentID == fmsg.segmentID {
flushTaskList[i].flushed = fmsg.flushed
dup = true
break
}
}
flushTaskList = append(flushTaskList, flushTask{
buffer: buf,
segmentID: currentSegID,
flushed: fmsg.flushed,
dropped: false,
})
// if merged, skip load buffer and create task
if !dup {
currentSegID := fmsg.segmentID
bd, ok := ibNode.insertBuffer.Load(currentSegID)
var buf *BufferData
if ok {
buf = bd.(*BufferData)
}
flushTaskList = append(flushTaskList, flushTask{
buffer: buf,
segmentID: currentSegID,
flushed: fmsg.flushed,
dropped: false,
})
}
default:
}
default:
}
flush:
for _, task := range flushTaskList {
err := ibNode.flushManager.flushBufferData(task.buffer, task.segmentID, task.flushed, task.dropped, endPositions[0])
if err != nil {