2021-01-19 11:37:16 +08:00
|
|
|
package datanode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"encoding/binary"
|
2021-03-05 10:15:27 +08:00
|
|
|
"fmt"
|
2021-01-19 11:37:16 +08:00
|
|
|
"path"
|
|
|
|
"strconv"
|
|
|
|
"unsafe"
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
"go.uber.org/zap"
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
"github.com/zilliztech/milvus-distributed/internal/kv"
|
|
|
|
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
2021-02-26 10:13:36 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/log"
|
2021-01-19 11:37:16 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
2021-02-26 10:13:36 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/storage"
|
|
|
|
|
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
2021-01-19 11:37:16 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
2021-03-12 14:22:09 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
2021-01-19 11:37:16 +08:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
CollectionPrefix = "/collection/"
|
|
|
|
SegmentPrefix = "/segment/"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
InsertData = storage.InsertData
|
|
|
|
Blob = storage.Blob
|
|
|
|
|
|
|
|
insertBufferNode struct {
|
|
|
|
BaseNode
|
2021-01-22 09:36:40 +08:00
|
|
|
insertBuffer *insertBuffer
|
2021-02-04 20:31:23 +08:00
|
|
|
replica Replica
|
2021-01-22 09:36:40 +08:00
|
|
|
flushMeta *metaTable
|
|
|
|
|
|
|
|
minIOKV kv.Base
|
|
|
|
minioPrefix string
|
|
|
|
|
2021-03-05 16:52:45 +08:00
|
|
|
idAllocator allocatorInterface
|
2021-01-22 09:36:40 +08:00
|
|
|
|
|
|
|
timeTickStream msgstream.MsgStream
|
|
|
|
segmentStatisticsStream msgstream.MsgStream
|
2021-01-22 19:36:09 +08:00
|
|
|
completeFlushStream msgstream.MsgStream
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
insertBuffer struct {
|
|
|
|
insertData map[UniqueID]*InsertData // SegmentID to InsertData
|
|
|
|
maxSize int32
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func (ib *insertBuffer) size(segmentID UniqueID) int32 {
|
|
|
|
if ib.insertData == nil || len(ib.insertData) <= 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
idata, ok := ib.insertData[segmentID]
|
|
|
|
if !ok {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var maxSize int32 = 0
|
|
|
|
for _, data := range idata.Data {
|
|
|
|
fdata, ok := data.(*storage.FloatVectorFieldData)
|
|
|
|
if ok && int32(fdata.NumRows) > maxSize {
|
|
|
|
maxSize = int32(fdata.NumRows)
|
|
|
|
}
|
|
|
|
|
|
|
|
bdata, ok := data.(*storage.BinaryVectorFieldData)
|
|
|
|
if ok && int32(bdata.NumRows) > maxSize {
|
|
|
|
maxSize = int32(bdata.NumRows)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return maxSize
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ib *insertBuffer) full(segmentID UniqueID) bool {
|
|
|
|
return ib.size(segmentID) >= ib.maxSize
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ibNode *insertBufferNode) Name() string {
|
|
|
|
return "ibNode"
|
|
|
|
}
|
|
|
|
|
2021-02-25 17:35:36 +08:00
|
|
|
func (ibNode *insertBufferNode) Operate(ctx context.Context, in []Msg) ([]Msg, context.Context) {
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
if len(in) != 1 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Invalid operate message input in insertBufferNode", zap.Int("input length", len(in)))
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
2021-02-25 17:35:36 +08:00
|
|
|
iMsg, ok := in[0].(*insertMsg)
|
2021-01-19 11:37:16 +08:00
|
|
|
if !ok {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("type assertion failed for insertMsg")
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
// Updating segment statistics
|
2021-03-16 17:55:42 +08:00
|
|
|
uniqueSeg := make(map[UniqueID]int64)
|
2021-01-21 09:55:25 +08:00
|
|
|
for _, msg := range iMsg.insertMessages {
|
|
|
|
currentSegID := msg.GetSegmentID()
|
2021-01-22 19:36:09 +08:00
|
|
|
collID := msg.GetCollectionID()
|
|
|
|
partitionID := msg.GetPartitionID()
|
|
|
|
|
2021-01-21 09:55:25 +08:00
|
|
|
if !ibNode.replica.hasSegment(currentSegID) {
|
2021-02-04 11:19:48 +08:00
|
|
|
err := ibNode.replica.addSegment(currentSegID, collID, partitionID, msg.GetChannelID())
|
2021-01-22 19:36:09 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("add segment wrong", zap.Error(err))
|
2021-01-22 19:36:09 +08:00
|
|
|
}
|
2021-03-16 17:55:42 +08:00
|
|
|
|
|
|
|
switch {
|
|
|
|
case iMsg.startPositions == nil || len(iMsg.startPositions) <= 0:
|
|
|
|
log.Error("insert Msg StartPosition empty")
|
|
|
|
default:
|
2021-03-22 16:36:10 +08:00
|
|
|
segment, err := ibNode.replica.getSegmentByID(currentSegID)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("get segment wrong", zap.Error(err))
|
|
|
|
}
|
|
|
|
var startPosition *internalpb.MsgPosition = nil
|
|
|
|
for _, pos := range iMsg.startPositions {
|
|
|
|
if pos.ChannelName == segment.channelName {
|
|
|
|
startPosition = pos
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if startPosition == nil {
|
|
|
|
log.Error("get position wrong", zap.Error(err))
|
|
|
|
} else {
|
|
|
|
ibNode.replica.setStartPosition(currentSegID, startPosition)
|
|
|
|
}
|
2021-03-16 17:55:42 +08:00
|
|
|
}
|
2021-01-22 19:36:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if !ibNode.flushMeta.hasSegmentFlush(currentSegID) {
|
|
|
|
err := ibNode.flushMeta.addSegmentFlush(currentSegID)
|
2021-01-21 09:55:25 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("add segment flush meta wrong", zap.Error(err))
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
|
|
|
}
|
2021-01-22 19:36:09 +08:00
|
|
|
|
2021-03-16 17:55:42 +08:00
|
|
|
segNum := uniqueSeg[currentSegID]
|
|
|
|
uniqueSeg[currentSegID] = segNum + int64(len(msg.RowIDs))
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
2021-02-04 11:19:48 +08:00
|
|
|
|
2021-01-21 09:55:25 +08:00
|
|
|
segIDs := make([]UniqueID, 0, len(uniqueSeg))
|
2021-03-16 17:55:42 +08:00
|
|
|
for id, num := range uniqueSeg {
|
2021-01-21 09:55:25 +08:00
|
|
|
segIDs = append(segIDs, id)
|
2021-03-16 17:55:42 +08:00
|
|
|
|
|
|
|
err := ibNode.replica.updateStatistics(id, num)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("update Segment Row number wrong", zap.Error(err))
|
|
|
|
}
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
2021-02-04 11:19:48 +08:00
|
|
|
|
|
|
|
if len(segIDs) > 0 {
|
2021-03-16 17:55:42 +08:00
|
|
|
err := ibNode.updateSegStatistics(segIDs)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("update segment statistics error", zap.Error(err))
|
2021-02-04 11:19:48 +08:00
|
|
|
}
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
// iMsg is insertMsg
|
|
|
|
// 1. iMsg -> buffer
|
|
|
|
for _, msg := range iMsg.insertMessages {
|
|
|
|
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("misaligned messages detected")
|
2021-01-19 11:37:16 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
currentSegID := msg.GetSegmentID()
|
2021-01-22 19:36:09 +08:00
|
|
|
collectionID := msg.GetCollectionID()
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
|
|
|
|
if !ok {
|
|
|
|
idata = &InsertData{
|
|
|
|
Data: make(map[UniqueID]storage.FieldData),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
// 1.1 Get CollectionMeta
|
|
|
|
collection, err := ibNode.replica.getCollectionByID(collectionID)
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
|
|
|
// GOOSE TODO add error handler
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Get meta wrong:", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
collSchema := collection.schema
|
|
|
|
// 1.2 Get Fields
|
|
|
|
var pos int = 0 // Record position of blob
|
|
|
|
for _, field := range collSchema.Fields {
|
|
|
|
switch field.DataType {
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_FloatVector:
|
2021-01-19 11:37:16 +08:00
|
|
|
var dim int
|
|
|
|
for _, t := range field.TypeParams {
|
|
|
|
if t.Key == "dim" {
|
|
|
|
dim, err = strconv.Atoi(t.Value)
|
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("strconv wrong")
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if dim <= 0 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("invalid dim")
|
|
|
|
continue
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.FloatVectorFieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]float32, 0),
|
|
|
|
Dim: dim,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.FloatVectorFieldData)
|
|
|
|
|
|
|
|
var offset int
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
offset = 0
|
|
|
|
for j := 0; j < dim; j++ {
|
|
|
|
var v float32
|
|
|
|
buf := bytes.NewBuffer(blob.GetValue()[pos+offset:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.read float32 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
offset += int(unsafe.Sizeof(*(&v)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pos += offset
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_BinaryVector:
|
2021-01-19 11:37:16 +08:00
|
|
|
var dim int
|
|
|
|
for _, t := range field.TypeParams {
|
|
|
|
if t.Key == "dim" {
|
|
|
|
dim, err = strconv.Atoi(t.Value)
|
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("strconv wrong")
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if dim <= 0 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("invalid dim")
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.BinaryVectorFieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]byte, 0),
|
|
|
|
Dim: dim,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.BinaryVectorFieldData)
|
|
|
|
|
|
|
|
var offset int
|
|
|
|
for _, blob := range msg.RowData {
|
2021-02-20 09:20:51 +08:00
|
|
|
bv := blob.GetValue()[pos : pos+(dim/8)]
|
2021-01-19 11:37:16 +08:00
|
|
|
fieldData.Data = append(fieldData.Data, bv...)
|
|
|
|
offset = len(bv)
|
|
|
|
}
|
|
|
|
pos += offset
|
|
|
|
fieldData.NumRows += len(msg.RowData)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Bool:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.BoolFieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]bool, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.BoolFieldData)
|
|
|
|
var v bool
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewReader(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read bool wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int8:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.Int8FieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]int8, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.Int8FieldData)
|
|
|
|
var v int8
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewReader(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read int8 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int16:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.Int16FieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]int16, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.Int16FieldData)
|
|
|
|
var v int16
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewReader(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read int16 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int32:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.Int32FieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]int32, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.Int32FieldData)
|
|
|
|
var v int32
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewReader(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read int32 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int64:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.Int64FieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]int64, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.Int64FieldData)
|
|
|
|
switch field.FieldID {
|
|
|
|
case 0: // rowIDs
|
|
|
|
fieldData.Data = append(fieldData.Data, msg.RowIDs...)
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
case 1: // Timestamps
|
|
|
|
for _, ts := range msg.Timestamps {
|
|
|
|
fieldData.Data = append(fieldData.Data, int64(ts))
|
|
|
|
}
|
|
|
|
fieldData.NumRows += len(msg.Timestamps)
|
|
|
|
default:
|
|
|
|
var v int64
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read int64 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
}
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Float:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.FloatFieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]float32, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.FloatFieldData)
|
|
|
|
var v float32
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read float32 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Double:
|
2021-01-19 11:37:16 +08:00
|
|
|
if _, ok := idata.Data[field.FieldID]; !ok {
|
|
|
|
idata.Data[field.FieldID] = &storage.DoubleFieldData{
|
|
|
|
NumRows: 0,
|
|
|
|
Data: make([]float64, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fieldData := idata.Data[field.FieldID].(*storage.DoubleFieldData)
|
|
|
|
var v float64
|
|
|
|
for _, blob := range msg.RowData {
|
|
|
|
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
|
|
|
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("binary.Read float64 wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
fieldData.Data = append(fieldData.Data, v)
|
|
|
|
}
|
|
|
|
|
|
|
|
pos += int(unsafe.Sizeof(*(&v)))
|
|
|
|
fieldData.NumRows += len(msg.RowIDs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1.3 store in buffer
|
|
|
|
ibNode.insertBuffer.insertData[currentSegID] = idata
|
|
|
|
|
2021-03-16 17:55:42 +08:00
|
|
|
switch {
|
|
|
|
case iMsg.endPositions == nil || len(iMsg.endPositions) <= 0:
|
|
|
|
log.Error("insert Msg EndPosition empty")
|
|
|
|
default:
|
2021-03-22 16:36:10 +08:00
|
|
|
segment, err := ibNode.replica.getSegmentByID(currentSegID)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("get segment wrong", zap.Error(err))
|
|
|
|
}
|
|
|
|
var endPosition *internalpb.MsgPosition = nil
|
|
|
|
for _, pos := range iMsg.endPositions {
|
|
|
|
if pos.ChannelName == segment.channelName {
|
|
|
|
endPosition = pos
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if endPosition == nil {
|
|
|
|
log.Error("get position wrong", zap.Error(err))
|
|
|
|
}
|
|
|
|
ibNode.replica.setEndPosition(currentSegID, endPosition)
|
2021-03-16 17:55:42 +08:00
|
|
|
}
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
// 1.4 if full
|
|
|
|
// 1.4.1 generate binlogs
|
|
|
|
if ibNode.insertBuffer.full(currentSegID) {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug(". Insert Buffer full, auto flushing ", zap.Int32("num of rows", ibNode.insertBuffer.size(currentSegID)))
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-02-04 20:31:23 +08:00
|
|
|
err = ibNode.flushSegment(currentSegID, msg.GetPartitionID(), collection.GetID())
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("flush segment fail", zap.Int64("segmentID", currentSegID), zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(iMsg.insertMessages) > 0 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("---insert buffer status---")
|
2021-01-19 11:37:16 +08:00
|
|
|
var stopSign int = 0
|
|
|
|
for k := range ibNode.insertBuffer.insertData {
|
|
|
|
if stopSign >= 10 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("......")
|
2021-01-19 11:37:16 +08:00
|
|
|
break
|
|
|
|
}
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("seg buffer status", zap.Int64("segmentID", k), zap.Int32("buffer size", ibNode.insertBuffer.size(k)))
|
2021-01-19 11:37:16 +08:00
|
|
|
stopSign++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
// iMsg is Flush() msg from dataservice
|
2021-01-19 11:37:16 +08:00
|
|
|
// 1. insertBuffer(not empty) -> binLogs -> minIO/S3
|
|
|
|
for _, msg := range iMsg.flushMessages {
|
2021-01-22 09:36:40 +08:00
|
|
|
for _, currentSegID := range msg.segmentIDs {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug(". Receiving flush message", zap.Int64("segmentID", currentSegID))
|
2021-01-22 09:36:40 +08:00
|
|
|
if ibNode.insertBuffer.size(currentSegID) > 0 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug(".. Buffer not empty, flushing ...")
|
2021-01-22 09:36:40 +08:00
|
|
|
seg, err := ibNode.replica.getSegmentByID(currentSegID)
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("flush segment fail", zap.Error(err))
|
2021-01-22 09:36:40 +08:00
|
|
|
continue
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
err = ibNode.flushSegment(currentSegID, seg.partitionID, seg.collectionID)
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("flush segment fail", zap.Int64("segmentID", currentSegID), zap.Error(err))
|
2021-01-22 09:36:40 +08:00
|
|
|
continue
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-07 17:02:13 +08:00
|
|
|
err := ibNode.completeFlush(currentSegID)
|
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("complete flush wrong", zap.Error(err))
|
2021-02-07 17:02:13 +08:00
|
|
|
}
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("Flush completed")
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ibNode.writeHardTimeTick(iMsg.timeRange.timestampMax); err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("send hard time tick into pulsar channel failed", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var res Msg = &gcMsg{
|
|
|
|
gcRecord: iMsg.gcRecord,
|
|
|
|
timeRange: iMsg.timeRange,
|
|
|
|
}
|
|
|
|
|
2021-02-25 17:35:36 +08:00
|
|
|
return []Msg{res}, ctx
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
func (ibNode *insertBufferNode) flushSegment(segID UniqueID, partitionID UniqueID, collID UniqueID) error {
|
2021-01-22 09:36:40 +08:00
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
collSch, err := ibNode.getCollectionSchemaByID(collID)
|
2021-01-22 09:36:40 +08:00
|
|
|
if err != nil {
|
2021-03-05 10:15:27 +08:00
|
|
|
return fmt.Errorf("Get collection by ID wrong, %v", err)
|
2021-01-22 09:36:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
collMeta := &etcdpb.CollectionMeta{
|
2021-01-22 19:36:09 +08:00
|
|
|
Schema: collSch,
|
2021-01-22 09:36:40 +08:00
|
|
|
ID: collID,
|
|
|
|
}
|
|
|
|
|
|
|
|
inCodec := storage.NewInsertCodec(collMeta)
|
|
|
|
|
|
|
|
// buffer data to binlogs
|
|
|
|
binLogs, err := inCodec.Serialize(partitionID,
|
|
|
|
segID, ibNode.insertBuffer.insertData[segID])
|
|
|
|
|
|
|
|
if err != nil {
|
2021-03-05 10:15:27 +08:00
|
|
|
return fmt.Errorf("generate binlog wrong: %v", err)
|
2021-01-22 09:36:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// clear buffer
|
|
|
|
delete(ibNode.insertBuffer.insertData, segID)
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug(".. Clearing buffer")
|
2021-01-22 09:36:40 +08:00
|
|
|
|
|
|
|
// 1.5.2 binLogs -> minIO/S3
|
2021-01-22 19:36:09 +08:00
|
|
|
collIDStr := strconv.FormatInt(collID, 10)
|
2021-01-22 09:36:40 +08:00
|
|
|
partitionIDStr := strconv.FormatInt(partitionID, 10)
|
|
|
|
segIDStr := strconv.FormatInt(segID, 10)
|
|
|
|
keyPrefix := path.Join(ibNode.minioPrefix, collIDStr, partitionIDStr, segIDStr)
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug(".. Saving binlogs to MinIO ...", zap.Int("number", len(binLogs)))
|
2021-01-22 09:36:40 +08:00
|
|
|
for index, blob := range binLogs {
|
2021-01-24 21:20:11 +08:00
|
|
|
uid, err := ibNode.idAllocator.allocID()
|
2021-01-22 09:36:40 +08:00
|
|
|
if err != nil {
|
2021-03-05 10:15:27 +08:00
|
|
|
return fmt.Errorf("Allocate Id failed, %v", err)
|
2021-01-22 09:36:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
key := path.Join(keyPrefix, blob.Key, strconv.FormatInt(uid, 10))
|
|
|
|
err = ibNode.minIOKV.Save(key, string(blob.Value[:]))
|
|
|
|
if err != nil {
|
2021-03-05 10:15:27 +08:00
|
|
|
return fmt.Errorf("Save to MinIO failed, %v", err)
|
2021-01-22 09:36:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fieldID, err := strconv.ParseInt(blob.Key, 10, 32)
|
|
|
|
if err != nil {
|
2021-03-05 10:15:27 +08:00
|
|
|
return fmt.Errorf("string to fieldID wrong, %v", err)
|
2021-01-22 09:36:40 +08:00
|
|
|
}
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("... Appending binlog paths ...", zap.Int("number", index))
|
2021-01-22 09:36:40 +08:00
|
|
|
ibNode.flushMeta.AppendSegBinlogPaths(segID, fieldID, []string{key})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
func (ibNode *insertBufferNode) completeFlush(segID UniqueID) error {
|
2021-03-16 17:55:42 +08:00
|
|
|
ibNode.replica.setIsFlushed(segID)
|
|
|
|
ibNode.updateSegStatistics([]UniqueID{segID})
|
|
|
|
|
2021-01-22 19:36:09 +08:00
|
|
|
msgPack := msgstream.MsgPack{}
|
2021-03-12 14:22:09 +08:00
|
|
|
completeFlushMsg := internalpb.SegmentFlushCompletedMsg{
|
2021-01-22 19:36:09 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-10 14:45:35 +08:00
|
|
|
MsgType: commonpb.MsgType_SegmentFlushDone,
|
2021-01-22 19:36:09 +08:00
|
|
|
MsgID: 0, // GOOSE TODO
|
|
|
|
Timestamp: 0, // GOOSE TODO
|
2021-01-24 21:20:11 +08:00
|
|
|
SourceID: Params.NodeID,
|
2021-01-22 19:36:09 +08:00
|
|
|
},
|
|
|
|
SegmentID: segID,
|
|
|
|
}
|
|
|
|
var msg msgstream.TsMsg = &msgstream.FlushCompletedMsg{
|
|
|
|
BaseMsg: msgstream.BaseMsg{
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
},
|
|
|
|
SegmentFlushCompletedMsg: completeFlushMsg,
|
|
|
|
}
|
|
|
|
|
|
|
|
msgPack.Msgs = append(msgPack.Msgs, msg)
|
2021-02-24 09:48:17 +08:00
|
|
|
return ibNode.completeFlushStream.Produce(context.TODO(), &msgPack)
|
2021-01-22 19:36:09 +08:00
|
|
|
}
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
func (ibNode *insertBufferNode) writeHardTimeTick(ts Timestamp) error {
|
|
|
|
msgPack := msgstream.MsgPack{}
|
|
|
|
timeTickMsg := msgstream.TimeTickMsg{
|
|
|
|
BaseMsg: msgstream.BaseMsg{
|
|
|
|
BeginTimestamp: ts,
|
|
|
|
EndTimestamp: ts,
|
|
|
|
HashValues: []uint32{0},
|
|
|
|
},
|
2021-03-12 14:22:09 +08:00
|
|
|
TimeTickMsg: internalpb.TimeTickMsg{
|
2021-01-19 11:37:16 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-10 14:45:35 +08:00
|
|
|
MsgType: commonpb.MsgType_TimeTick,
|
2021-01-22 19:36:09 +08:00
|
|
|
MsgID: 0, // GOOSE TODO
|
|
|
|
Timestamp: ts, // GOOSE TODO
|
2021-01-24 21:20:11 +08:00
|
|
|
SourceID: Params.NodeID,
|
2021-01-19 11:37:16 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
|
2021-02-24 09:48:17 +08:00
|
|
|
return ibNode.timeTickStream.Produce(context.TODO(), &msgPack)
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-03-16 17:55:42 +08:00
|
|
|
func (ibNode *insertBufferNode) updateSegStatistics(segIDs []UniqueID) error {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Debug("Updating segments statistics...")
|
2021-03-12 14:22:09 +08:00
|
|
|
statsUpdates := make([]*internalpb.SegmentStatisticsUpdates, 0, len(segIDs))
|
2021-01-21 09:55:25 +08:00
|
|
|
for _, segID := range segIDs {
|
|
|
|
updates, err := ibNode.replica.getSegmentStatisticsUpdates(segID)
|
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("get segment statistics updates wrong", zap.Int64("segmentID", segID), zap.Error(err))
|
2021-01-21 09:55:25 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
statsUpdates = append(statsUpdates, updates)
|
|
|
|
}
|
|
|
|
|
2021-03-12 14:22:09 +08:00
|
|
|
segStats := internalpb.SegmentStatistics{
|
2021-01-21 09:55:25 +08:00
|
|
|
Base: &commonpb.MsgBase{
|
2021-03-10 14:45:35 +08:00
|
|
|
MsgType: commonpb.MsgType_SegmentStatistics,
|
2021-01-21 09:55:25 +08:00
|
|
|
MsgID: UniqueID(0), // GOOSE TODO
|
|
|
|
Timestamp: Timestamp(0), // GOOSE TODO
|
2021-01-24 21:20:11 +08:00
|
|
|
SourceID: Params.NodeID,
|
2021-01-21 09:55:25 +08:00
|
|
|
},
|
|
|
|
SegStats: statsUpdates,
|
|
|
|
}
|
|
|
|
|
|
|
|
var msg msgstream.TsMsg = &msgstream.SegmentStatisticsMsg{
|
|
|
|
BaseMsg: msgstream.BaseMsg{
|
2021-02-04 11:19:48 +08:00
|
|
|
HashValues: []uint32{0}, // GOOSE TODO
|
2021-01-21 09:55:25 +08:00
|
|
|
},
|
|
|
|
SegmentStatistics: segStats,
|
|
|
|
}
|
|
|
|
|
|
|
|
var msgPack = msgstream.MsgPack{
|
|
|
|
Msgs: []msgstream.TsMsg{msg},
|
|
|
|
}
|
2021-02-24 09:48:17 +08:00
|
|
|
return ibNode.segmentStatisticsStream.Produce(context.TODO(), &msgPack)
|
2021-01-21 09:55:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ibNode *insertBufferNode) getCollectionSchemaByID(collectionID UniqueID) (*schemapb.CollectionSchema, error) {
|
|
|
|
ret, err := ibNode.replica.getCollectionByID(collectionID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return ret.schema, nil
|
|
|
|
}
|
|
|
|
|
2021-01-24 21:20:11 +08:00
|
|
|
func newInsertBufferNode(ctx context.Context, flushMeta *metaTable,
|
2021-03-05 16:52:45 +08:00
|
|
|
replica Replica, alloc allocatorInterface, factory msgstream.Factory) *insertBufferNode {
|
2021-01-19 11:37:16 +08:00
|
|
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
|
|
|
maxParallelism := Params.FlowGraphMaxParallelism
|
|
|
|
|
|
|
|
baseNode := BaseNode{}
|
|
|
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
|
|
|
baseNode.SetMaxParallelism(maxParallelism)
|
|
|
|
|
|
|
|
maxSize := Params.FlushInsertBufferSize
|
|
|
|
iBuffer := &insertBuffer{
|
|
|
|
insertData: make(map[UniqueID]*InsertData),
|
|
|
|
maxSize: maxSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
// MinIO
|
|
|
|
option := &miniokv.Option{
|
|
|
|
Address: Params.MinioAddress,
|
|
|
|
AccessKeyID: Params.MinioAccessKeyID,
|
|
|
|
SecretAccessKeyID: Params.MinioSecretAccessKey,
|
|
|
|
UseSSL: Params.MinioUseSSL,
|
|
|
|
CreateBucket: true,
|
|
|
|
BucketName: Params.MinioBucketName,
|
|
|
|
}
|
|
|
|
|
|
|
|
minIOKV, err := miniokv.NewMinIOKV(ctx, option)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
minioPrefix := Params.InsertBinlogRootPath
|
|
|
|
|
2021-01-21 09:55:25 +08:00
|
|
|
//input stream, data node time tick
|
2021-02-04 14:37:12 +08:00
|
|
|
wTt, _ := factory.NewMsgStream(ctx)
|
|
|
|
wTt.AsProducer([]string{Params.TimeTickChannelName})
|
2021-03-05 18:16:50 +08:00
|
|
|
log.Debug("datanode AsProducer: " + Params.TimeTickChannelName)
|
2021-01-21 09:55:25 +08:00
|
|
|
var wTtMsgStream msgstream.MsgStream = wTt
|
2021-01-24 21:20:11 +08:00
|
|
|
wTtMsgStream.Start()
|
2021-01-21 09:55:25 +08:00
|
|
|
|
|
|
|
// update statistics channel
|
2021-02-04 14:37:12 +08:00
|
|
|
segS, _ := factory.NewMsgStream(ctx)
|
|
|
|
segS.AsProducer([]string{Params.SegmentStatisticsChannelName})
|
2021-03-05 18:16:50 +08:00
|
|
|
log.Debug("datanode AsProducer: " + Params.SegmentStatisticsChannelName)
|
2021-01-21 09:55:25 +08:00
|
|
|
var segStatisticsMsgStream msgstream.MsgStream = segS
|
2021-01-24 21:20:11 +08:00
|
|
|
segStatisticsMsgStream.Start()
|
2021-01-22 19:36:09 +08:00
|
|
|
|
|
|
|
// segment flush completed channel
|
2021-02-04 14:37:12 +08:00
|
|
|
cf, _ := factory.NewMsgStream(ctx)
|
|
|
|
cf.AsProducer([]string{Params.CompleteFlushChannelName})
|
2021-03-05 18:16:50 +08:00
|
|
|
log.Debug("datanode AsProducer: " + Params.CompleteFlushChannelName)
|
2021-01-22 19:36:09 +08:00
|
|
|
var completeFlushStream msgstream.MsgStream = cf
|
2021-01-24 21:20:11 +08:00
|
|
|
completeFlushStream.Start()
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
return &insertBufferNode{
|
2021-01-22 09:36:40 +08:00
|
|
|
BaseNode: baseNode,
|
|
|
|
insertBuffer: iBuffer,
|
|
|
|
minIOKV: minIOKV,
|
|
|
|
minioPrefix: minioPrefix,
|
2021-01-24 21:20:11 +08:00
|
|
|
idAllocator: alloc,
|
2021-01-22 09:36:40 +08:00
|
|
|
timeTickStream: wTtMsgStream,
|
|
|
|
segmentStatisticsStream: segStatisticsMsgStream,
|
2021-01-22 19:36:09 +08:00
|
|
|
completeFlushStream: completeFlushStream,
|
2021-01-22 09:36:40 +08:00
|
|
|
replica: replica,
|
|
|
|
flushMeta: flushMeta,
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|