2021-12-01 18:43:32 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
2021-04-19 13:47:10 +08:00
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
2021-12-01 18:43:32 +08:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2021-04-19 13:47:10 +08:00
|
|
|
//
|
2021-12-01 18:43:32 +08:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2021-04-19 13:47:10 +08:00
|
|
|
|
2021-01-16 10:12:14 +08:00
|
|
|
package querynode
|
2020-11-09 16:27:11 +08:00
|
|
|
|
|
|
|
import (
|
2021-10-19 20:18:47 +08:00
|
|
|
"bytes"
|
2021-10-18 20:08:42 +08:00
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
2021-10-19 20:18:47 +08:00
|
|
|
"io"
|
2022-05-07 10:27:51 +08:00
|
|
|
"reflect"
|
2022-04-29 13:35:49 +08:00
|
|
|
"sort"
|
2021-10-19 20:18:47 +08:00
|
|
|
"strconv"
|
2020-11-09 16:27:11 +08:00
|
|
|
"sync"
|
2020-11-12 12:04:12 +08:00
|
|
|
|
2021-06-09 11:37:55 +08:00
|
|
|
"github.com/opentracing/opentracing-go"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2021-11-02 18:16:32 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/common"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
2022-03-03 21:57:56 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
2021-10-19 20:18:47 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
2022-04-29 13:35:49 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/segcorepb"
|
2022-04-02 17:43:29 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/storage"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
|
|
|
"github.com/milvus-io/milvus/internal/util/trace"
|
2022-04-02 17:43:29 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2020-11-09 16:27:11 +08:00
|
|
|
)
|
|
|
|
|
2021-12-06 21:15:48 +08:00
|
|
|
// insertNode is one of the nodes in query flow graph
|
2020-11-09 16:27:11 +08:00
|
|
|
type insertNode struct {
|
2021-01-15 15:28:54 +08:00
|
|
|
baseNode
|
2022-06-02 14:00:04 +08:00
|
|
|
collectionID UniqueID
|
|
|
|
metaReplica ReplicaInterface // streaming
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-12-08 13:53:13 +08:00
|
|
|
// insertData stores the valid insert data
|
2021-10-09 22:52:59 +08:00
|
|
|
type insertData struct {
|
2022-04-02 17:43:29 +08:00
|
|
|
insertIDs map[UniqueID][]int64 // rowIDs
|
2020-11-09 16:27:11 +08:00
|
|
|
insertTimestamps map[UniqueID][]Timestamp
|
2022-04-29 13:35:49 +08:00
|
|
|
insertRecords map[UniqueID][]*schemapb.FieldData
|
2020-11-09 16:27:11 +08:00
|
|
|
insertOffset map[UniqueID]int64
|
2022-04-02 17:43:29 +08:00
|
|
|
insertPKs map[UniqueID][]primaryKey // pks
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-12-07 21:29:21 +08:00
|
|
|
// deleteData stores the valid delete data
|
2021-10-15 11:02:32 +08:00
|
|
|
type deleteData struct {
|
2022-04-02 17:43:29 +08:00
|
|
|
deleteIDs map[UniqueID][]primaryKey // pks
|
2021-10-15 11:02:32 +08:00
|
|
|
deleteTimestamps map[UniqueID][]Timestamp
|
|
|
|
deleteOffset map[UniqueID]int64
|
|
|
|
}
|
|
|
|
|
2021-12-08 09:25:40 +08:00
|
|
|
// Name returns the name of insertNode
|
2020-11-09 16:27:11 +08:00
|
|
|
func (iNode *insertNode) Name() string {
|
|
|
|
return "iNode"
|
|
|
|
}
|
|
|
|
|
2021-12-08 09:27:31 +08:00
|
|
|
// Operate handles input messages, to execute insert operations
|
2021-03-25 14:41:46 +08:00
|
|
|
func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
2020-11-09 16:27:11 +08:00
|
|
|
if len(in) != 1 {
|
2022-03-09 15:17:59 +08:00
|
|
|
log.Warn("Invalid operate message input in insertNode", zap.Int("input length", len(in)))
|
|
|
|
return []Msg{}
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-02-25 17:35:36 +08:00
|
|
|
iMsg, ok := in[0].(*insertMsg)
|
2020-11-09 16:27:11 +08:00
|
|
|
if !ok {
|
2022-05-07 10:27:51 +08:00
|
|
|
if in[0] == nil {
|
|
|
|
log.Debug("type assertion failed for insertMsg because it's nil")
|
|
|
|
} else {
|
|
|
|
log.Warn("type assertion failed for insertMsg", zap.String("name", reflect.TypeOf(in[0]).Name()))
|
|
|
|
}
|
2022-03-09 15:17:59 +08:00
|
|
|
return []Msg{}
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-10-09 22:52:59 +08:00
|
|
|
iData := insertData{
|
2021-09-14 10:25:26 +08:00
|
|
|
insertIDs: make(map[UniqueID][]int64),
|
|
|
|
insertTimestamps: make(map[UniqueID][]Timestamp),
|
2022-04-29 13:35:49 +08:00
|
|
|
insertRecords: make(map[UniqueID][]*schemapb.FieldData),
|
2021-09-14 10:25:26 +08:00
|
|
|
insertOffset: make(map[UniqueID]int64),
|
2022-04-02 17:43:29 +08:00
|
|
|
insertPKs: make(map[UniqueID][]primaryKey),
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-03-22 16:36:10 +08:00
|
|
|
if iMsg == nil {
|
2021-03-25 14:41:46 +08:00
|
|
|
return []Msg{}
|
|
|
|
}
|
|
|
|
|
|
|
|
var spans []opentracing.Span
|
|
|
|
for _, msg := range iMsg.insertMessages {
|
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
spans = append(spans, sp)
|
|
|
|
msg.SetTraceCtx(ctx)
|
2021-03-22 16:36:10 +08:00
|
|
|
}
|
|
|
|
|
2022-06-02 14:00:04 +08:00
|
|
|
collection, err := iNode.metaReplica.getCollectionByID(iNode.collectionID)
|
|
|
|
if err != nil {
|
|
|
|
// QueryNode should add collection before start flow graph
|
|
|
|
panic(fmt.Errorf("%s getCollectionByID failed, collectionID = %d", iNode.Name(), iNode.collectionID))
|
|
|
|
}
|
|
|
|
collection.RLock()
|
|
|
|
defer collection.RUnlock()
|
2020-11-09 16:27:11 +08:00
|
|
|
// 1. hash insertMessages to insertData
|
2022-04-29 13:35:49 +08:00
|
|
|
// sort timestamps ensures that the data in iData.insertRecords is sorted in ascending order of timestamp
|
|
|
|
// avoiding re-sorting in segCore, which will need data copying
|
|
|
|
sort.Slice(iMsg.insertMessages, func(i, j int) bool {
|
|
|
|
return iMsg.insertMessages[i].BeginTs() < iMsg.insertMessages[j].BeginTs()
|
|
|
|
})
|
2022-03-04 15:09:56 +08:00
|
|
|
for _, insertMsg := range iMsg.insertMessages {
|
2021-12-17 20:12:42 +08:00
|
|
|
// if loadType is loadCollection, check if partition exists, if not, create partition
|
2022-06-02 14:00:04 +08:00
|
|
|
if collection.getLoadType() == loadTypeCollection {
|
2022-05-31 13:42:03 +08:00
|
|
|
err = iNode.metaReplica.addPartition(insertMsg.CollectionID, insertMsg.PartitionID)
|
2021-06-15 12:41:40 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs only when collection cannot be found, should not happen
|
|
|
|
err = fmt.Errorf("insertNode addPartition failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-06-15 12:41:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 17:01:10 +08:00
|
|
|
// check if segment exists, if not, create this segment
|
2022-05-31 13:42:03 +08:00
|
|
|
has, err := iNode.metaReplica.hasSegment(insertMsg.SegmentID, segmentTypeGrowing)
|
|
|
|
if err != nil {
|
|
|
|
log.Error(err.Error()) // never gonna happen
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
err = iNode.metaReplica.addSegment(insertMsg.SegmentID, insertMsg.PartitionID, insertMsg.CollectionID, insertMsg.ShardName, segmentTypeGrowing)
|
2020-11-16 17:01:10 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs when collection or partition cannot be found, collection and partition should be created before
|
|
|
|
err = fmt.Errorf("insertNode addSegment failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2020-11-16 17:01:10 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-12 19:23:06 +08:00
|
|
|
|
2022-06-02 14:00:04 +08:00
|
|
|
insertRecord, err := storage.TransferInsertMsgToInsertRecord(collection.schema, insertMsg)
|
2022-04-29 13:35:49 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// occurs only when schema doesn't have dim param, this should not happen
|
|
|
|
err = fmt.Errorf("failed to transfer msgStream.insertMsg to storage.InsertRecord, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2022-03-25 14:27:25 +08:00
|
|
|
}
|
|
|
|
|
2022-03-04 15:09:56 +08:00
|
|
|
iData.insertIDs[insertMsg.SegmentID] = append(iData.insertIDs[insertMsg.SegmentID], insertMsg.RowIDs...)
|
|
|
|
iData.insertTimestamps[insertMsg.SegmentID] = append(iData.insertTimestamps[insertMsg.SegmentID], insertMsg.Timestamps...)
|
2022-04-29 13:35:49 +08:00
|
|
|
if _, ok := iData.insertRecords[insertMsg.SegmentID]; !ok {
|
|
|
|
iData.insertRecords[insertMsg.SegmentID] = insertRecord.FieldsData
|
|
|
|
} else {
|
|
|
|
typeutil.MergeFieldData(iData.insertRecords[insertMsg.SegmentID], insertRecord.FieldsData)
|
|
|
|
}
|
2022-05-31 13:42:03 +08:00
|
|
|
pks, err := getPrimaryKeys(insertMsg, iNode.metaReplica)
|
2021-11-29 22:01:41 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs when cannot find collection or data is misaligned, should not happen
|
|
|
|
err = fmt.Errorf("failed to get primary keys, err = %d", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-11-29 22:01:41 +08:00
|
|
|
}
|
2022-03-04 15:09:56 +08:00
|
|
|
iData.insertPKs[insertMsg.SegmentID] = append(iData.insertPKs[insertMsg.SegmentID], pks...)
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// 2. do preInsert
|
2021-10-09 22:52:59 +08:00
|
|
|
for segmentID := range iData.insertRecords {
|
2022-05-31 13:42:03 +08:00
|
|
|
var targetSegment, err = iNode.metaReplica.getSegmentByID(segmentID, segmentTypeGrowing)
|
2020-11-09 16:27:11 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// should not happen, segment should be created before
|
|
|
|
err = fmt.Errorf("insertNode getSegmentByID failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2022-04-29 13:35:49 +08:00
|
|
|
var numOfRecords = len(iData.insertIDs[segmentID])
|
2020-11-09 16:27:11 +08:00
|
|
|
if targetSegment != nil {
|
2021-03-12 19:23:06 +08:00
|
|
|
offset, err := targetSegment.segmentPreInsert(numOfRecords)
|
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs when cgo function `PreInsert` failed
|
|
|
|
err = fmt.Errorf("segmentPreInsert failed, segmentID = %d, err = %s", segmentID, err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-03-12 19:23:06 +08:00
|
|
|
}
|
2021-10-09 22:52:59 +08:00
|
|
|
iData.insertOffset[segmentID] = offset
|
2021-03-31 16:16:58 +08:00
|
|
|
log.Debug("insertNode operator", zap.Int("insert size", numOfRecords), zap.Int64("insert offset", offset), zap.Int64("segment id", segmentID))
|
2021-10-19 20:18:47 +08:00
|
|
|
targetSegment.updateBloomFilter(iData.insertPKs[segmentID])
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 3. do insert
|
|
|
|
wg := sync.WaitGroup{}
|
2021-10-09 22:52:59 +08:00
|
|
|
for segmentID := range iData.insertRecords {
|
2022-05-24 21:11:59 +08:00
|
|
|
segmentID := segmentID
|
2020-11-09 16:27:11 +08:00
|
|
|
wg.Add(1)
|
2022-05-24 21:11:59 +08:00
|
|
|
go func() {
|
|
|
|
err := iNode.insert(&iData, segmentID, &wg)
|
|
|
|
if err != nil {
|
|
|
|
// error occurs when segment cannot be found or cgo function `Insert` failed
|
|
|
|
err = fmt.Errorf("segment insert failed, segmentID = %d, err = %s", segmentID, err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
2021-10-18 20:08:42 +08:00
|
|
|
delData := &deleteData{
|
2022-04-02 17:43:29 +08:00
|
|
|
deleteIDs: make(map[UniqueID][]primaryKey),
|
2021-10-18 20:08:42 +08:00
|
|
|
deleteTimestamps: make(map[UniqueID][]Timestamp),
|
|
|
|
deleteOffset: make(map[UniqueID]int64),
|
|
|
|
}
|
|
|
|
// 1. filter segment by bloom filter
|
|
|
|
for _, delMsg := range iMsg.deleteMessages {
|
2022-05-31 13:42:03 +08:00
|
|
|
if iNode.metaReplica.getSegmentNum(segmentTypeGrowing) != 0 {
|
2021-11-09 09:27:04 +08:00
|
|
|
log.Debug("delete in streaming replica",
|
|
|
|
zap.Any("collectionID", delMsg.CollectionID),
|
|
|
|
zap.Any("collectionName", delMsg.CollectionName),
|
2022-06-15 20:46:14 +08:00
|
|
|
zap.Int64("numPKs", delMsg.NumRows))
|
2022-05-31 13:42:03 +08:00
|
|
|
err := processDeleteMessages(iNode.metaReplica, segmentTypeGrowing, delMsg, delData)
|
2022-05-24 21:11:59 +08:00
|
|
|
if err != nil {
|
|
|
|
// error occurs when missing meta info or unexpected pk type, should not happen
|
|
|
|
err = fmt.Errorf("insertNode processDeleteMessages failed, collectionID = %d, err = %s", delMsg.CollectionID, err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
|
|
|
}
|
2021-10-18 20:08:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-27 18:01:15 +08:00
|
|
|
// 2. do preDelete
|
2021-10-27 14:36:23 +08:00
|
|
|
for segmentID, pks := range delData.deleteIDs {
|
2022-05-31 13:42:03 +08:00
|
|
|
segment, err := iNode.metaReplica.getSegmentByID(segmentID, segmentTypeGrowing)
|
2021-11-09 09:27:04 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
// error occurs when segment cannot be found, should not happen
|
|
|
|
err = fmt.Errorf("insertNode getSegmentByID failed, err = %s", err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
2021-11-09 09:27:04 +08:00
|
|
|
}
|
2021-10-27 14:36:23 +08:00
|
|
|
offset := segment.segmentPreDelete(len(pks))
|
|
|
|
delData.deleteOffset[segmentID] = offset
|
|
|
|
}
|
|
|
|
|
2021-10-27 18:01:15 +08:00
|
|
|
// 3. do delete
|
2021-11-09 09:27:04 +08:00
|
|
|
for segmentID := range delData.deleteOffset {
|
2022-05-24 21:11:59 +08:00
|
|
|
segmentID := segmentID
|
2021-10-18 20:08:42 +08:00
|
|
|
wg.Add(1)
|
2022-05-24 21:11:59 +08:00
|
|
|
go func() {
|
|
|
|
err := iNode.delete(delData, segmentID, &wg)
|
|
|
|
if err != nil {
|
|
|
|
// error occurs when segment cannot be found, calling cgo function delete failed and etc...
|
|
|
|
err = fmt.Errorf("segment delete failed, segmentID = %d, err = %s", segmentID, err)
|
|
|
|
log.Error(err.Error())
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}()
|
2021-10-18 20:08:42 +08:00
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
2020-11-09 16:27:11 +08:00
|
|
|
var res Msg = &serviceTimeMsg{
|
|
|
|
timeRange: iMsg.timeRange,
|
|
|
|
}
|
2021-03-25 14:41:46 +08:00
|
|
|
for _, sp := range spans {
|
|
|
|
sp.Finish()
|
|
|
|
}
|
2021-06-09 11:37:55 +08:00
|
|
|
|
2021-03-25 14:41:46 +08:00
|
|
|
return []Msg{res}
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-12-09 14:10:06 +08:00
|
|
|
// processDeleteMessages would execute delete operations for growing segments
|
2022-05-31 13:42:03 +08:00
|
|
|
func processDeleteMessages(replica ReplicaInterface, segType segmentType, msg *msgstream.DeleteMsg, delData *deleteData) error {
|
2021-10-26 14:46:19 +08:00
|
|
|
var partitionIDs []UniqueID
|
|
|
|
var err error
|
|
|
|
if msg.PartitionID != -1 {
|
|
|
|
partitionIDs = []UniqueID{msg.PartitionID}
|
|
|
|
} else {
|
|
|
|
partitionIDs, err = replica.getPartitionIDs(msg.CollectionID)
|
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return err
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
resultSegmentIDs := make([]UniqueID, 0)
|
|
|
|
for _, partitionID := range partitionIDs {
|
2022-05-31 13:42:03 +08:00
|
|
|
segmentIDs, err := replica.getSegmentIDs(partitionID, segType)
|
2021-10-26 14:46:19 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return err
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
|
|
|
resultSegmentIDs = append(resultSegmentIDs, segmentIDs...)
|
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
|
|
|
|
primaryKeys := storage.ParseIDs2PrimaryKeys(msg.PrimaryKeys)
|
2021-10-26 14:46:19 +08:00
|
|
|
for _, segmentID := range resultSegmentIDs {
|
2022-05-31 13:42:03 +08:00
|
|
|
segment, err := replica.getSegmentByID(segmentID, segType)
|
2021-10-26 14:46:19 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return err
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
pks, tss, err := filterSegmentsByPKs(primaryKeys, msg.Timestamps, segment)
|
2021-10-26 14:46:19 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return err
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
|
|
|
if len(pks) > 0 {
|
|
|
|
delData.deleteIDs[segmentID] = append(delData.deleteIDs[segmentID], pks...)
|
2022-04-02 17:43:29 +08:00
|
|
|
delData.deleteTimestamps[segmentID] = append(delData.deleteTimestamps[segmentID], tss...)
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
|
|
|
}
|
2022-05-24 21:11:59 +08:00
|
|
|
return nil
|
2021-10-26 14:46:19 +08:00
|
|
|
}
|
|
|
|
|
2021-12-09 14:12:00 +08:00
|
|
|
// filterSegmentsByPKs would filter segments by primary keys
|
2022-04-02 17:43:29 +08:00
|
|
|
func filterSegmentsByPKs(pks []primaryKey, timestamps []Timestamp, segment *Segment) ([]primaryKey, []Timestamp, error) {
|
2021-10-18 20:08:42 +08:00
|
|
|
if segment == nil {
|
2022-04-02 17:43:29 +08:00
|
|
|
return nil, nil, fmt.Errorf("segments is nil when getSegmentsByPKs")
|
2021-10-18 20:08:42 +08:00
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
|
|
|
|
retPks := make([]primaryKey, 0)
|
|
|
|
retTss := make([]Timestamp, 0)
|
2021-10-18 20:08:42 +08:00
|
|
|
buf := make([]byte, 8)
|
2022-04-02 17:43:29 +08:00
|
|
|
for index, pk := range pks {
|
|
|
|
exist := false
|
|
|
|
switch pk.Type() {
|
|
|
|
case schemapb.DataType_Int64:
|
|
|
|
int64Pk := pk.(*int64PrimaryKey)
|
|
|
|
common.Endian.PutUint64(buf, uint64(int64Pk.Value))
|
|
|
|
exist = segment.pkFilter.Test(buf)
|
|
|
|
case schemapb.DataType_VarChar:
|
|
|
|
varCharPk := pk.(*varCharPrimaryKey)
|
|
|
|
exist = segment.pkFilter.TestString(varCharPk.Value)
|
|
|
|
default:
|
|
|
|
return nil, nil, fmt.Errorf("invalid data type of delete primary keys")
|
|
|
|
}
|
2021-10-20 16:34:36 +08:00
|
|
|
if exist {
|
2022-04-02 17:43:29 +08:00
|
|
|
retPks = append(retPks, pk)
|
|
|
|
retTss = append(retTss, timestamps[index])
|
2021-10-20 16:34:36 +08:00
|
|
|
}
|
2021-10-18 20:08:42 +08:00
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
return retPks, retTss, nil
|
2021-10-18 20:08:42 +08:00
|
|
|
}
|
|
|
|
|
2021-12-09 14:13:53 +08:00
|
|
|
// insert would execute insert operations for specific growing segment
|
2022-05-24 21:11:59 +08:00
|
|
|
func (iNode *insertNode) insert(iData *insertData, segmentID UniqueID, wg *sync.WaitGroup) error {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2022-05-31 13:42:03 +08:00
|
|
|
var targetSegment, err = iNode.metaReplica.getSegmentByID(segmentID, segmentTypeGrowing)
|
2020-11-09 16:27:11 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return fmt.Errorf("getSegmentByID failed, err = %s", err)
|
2021-03-22 16:36:10 +08:00
|
|
|
}
|
|
|
|
|
2021-10-09 22:52:59 +08:00
|
|
|
ids := iData.insertIDs[segmentID]
|
|
|
|
timestamps := iData.insertTimestamps[segmentID]
|
|
|
|
offsets := iData.insertOffset[segmentID]
|
2022-04-29 13:35:49 +08:00
|
|
|
insertRecord := &segcorepb.InsertRecord{
|
|
|
|
FieldsData: iData.insertRecords[segmentID],
|
|
|
|
NumRows: int64(len(ids)),
|
|
|
|
}
|
2020-11-09 16:27:11 +08:00
|
|
|
|
2022-04-29 13:35:49 +08:00
|
|
|
err = targetSegment.segmentInsert(offsets, ids, timestamps, insertRecord)
|
2020-11-09 16:27:11 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return fmt.Errorf("segmentInsert failed, segmentID = %d, err = %s", segmentID, err)
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-12-10 18:51:08 +08:00
|
|
|
log.Debug("Do insert done", zap.Int("len", len(iData.insertIDs[segmentID])), zap.Int64("collectionID", targetSegment.collectionID), zap.Int64("segmentID", segmentID))
|
2022-05-24 21:11:59 +08:00
|
|
|
return nil
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
|
2021-12-09 22:16:03 +08:00
|
|
|
// delete would execute delete operations for specific growing segment
|
2022-05-24 21:11:59 +08:00
|
|
|
func (iNode *insertNode) delete(deleteData *deleteData, segmentID UniqueID, wg *sync.WaitGroup) error {
|
2021-10-15 11:02:32 +08:00
|
|
|
defer wg.Done()
|
2022-05-31 13:42:03 +08:00
|
|
|
targetSegment, err := iNode.metaReplica.getSegmentByID(segmentID, segmentTypeGrowing)
|
2021-11-09 09:27:04 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return fmt.Errorf("getSegmentByID failed, err = %s", err)
|
2021-11-09 09:27:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if targetSegment.segmentType != segmentTypeGrowing {
|
2022-05-24 21:11:59 +08:00
|
|
|
return fmt.Errorf("unexpected segmentType when delete, segmentType = %s", targetSegment.segmentType.String())
|
2021-10-15 11:02:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ids := deleteData.deleteIDs[segmentID]
|
|
|
|
timestamps := deleteData.deleteTimestamps[segmentID]
|
|
|
|
offset := deleteData.deleteOffset[segmentID]
|
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
err = targetSegment.segmentDelete(offset, ids, timestamps)
|
2021-10-15 11:02:32 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return fmt.Errorf("segmentDelete failed, err = %s", err)
|
2021-10-15 11:02:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("Do delete done", zap.Int("len", len(deleteData.deleteIDs[segmentID])), zap.Int64("segmentID", segmentID))
|
2022-05-24 21:11:59 +08:00
|
|
|
return nil
|
2021-10-15 11:02:32 +08:00
|
|
|
}
|
|
|
|
|
2021-11-12 18:27:10 +08:00
|
|
|
// TODO: remove this function to proper file
|
2021-12-09 22:17:50 +08:00
|
|
|
// getPrimaryKeys would get primary keys by insert messages
|
2022-05-31 13:42:03 +08:00
|
|
|
func getPrimaryKeys(msg *msgstream.InsertMsg, metaReplica ReplicaInterface) ([]primaryKey, error) {
|
2022-03-25 14:27:25 +08:00
|
|
|
if err := msg.CheckAligned(); err != nil {
|
2022-05-17 13:01:57 +08:00
|
|
|
log.Warn("misaligned messages detected", zap.Error(err))
|
|
|
|
return nil, err
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
|
|
|
collectionID := msg.GetCollectionID()
|
|
|
|
|
2022-05-31 13:42:03 +08:00
|
|
|
collection, err := metaReplica.getCollectionByID(collectionID)
|
2021-11-09 09:27:04 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn(err.Error())
|
2021-11-29 22:01:41 +08:00
|
|
|
return nil, err
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
2022-03-04 15:09:56 +08:00
|
|
|
|
|
|
|
return getPKs(msg, collection.schema)
|
|
|
|
}
|
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
func getPKs(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
2022-03-04 15:09:56 +08:00
|
|
|
if msg.IsRowBased() {
|
|
|
|
return getPKsFromRowBasedInsertMsg(msg, schema)
|
|
|
|
}
|
|
|
|
return getPKsFromColumnBasedInsertMsg(msg, schema)
|
|
|
|
}
|
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
2021-10-19 20:18:47 +08:00
|
|
|
offset := 0
|
2022-03-04 15:09:56 +08:00
|
|
|
for _, field := range schema.Fields {
|
2021-10-19 20:18:47 +08:00
|
|
|
if field.IsPrimaryKey {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
switch field.DataType {
|
|
|
|
case schemapb.DataType_Bool:
|
|
|
|
offset++
|
|
|
|
case schemapb.DataType_Int8:
|
|
|
|
offset++
|
|
|
|
case schemapb.DataType_Int16:
|
|
|
|
offset += 2
|
|
|
|
case schemapb.DataType_Int32:
|
|
|
|
offset += 4
|
|
|
|
case schemapb.DataType_Int64:
|
|
|
|
offset += 8
|
|
|
|
case schemapb.DataType_Float:
|
|
|
|
offset += 4
|
|
|
|
case schemapb.DataType_Double:
|
|
|
|
offset += 8
|
|
|
|
case schemapb.DataType_FloatVector:
|
|
|
|
for _, t := range field.TypeParams {
|
|
|
|
if t.Key == "dim" {
|
|
|
|
dim, err := strconv.Atoi(t.Value)
|
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return nil, fmt.Errorf("strconv wrong on get dim, err = %s", err)
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
|
|
|
offset += dim * 4
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case schemapb.DataType_BinaryVector:
|
|
|
|
for _, t := range field.TypeParams {
|
|
|
|
if t.Key == "dim" {
|
2021-10-26 14:46:19 +08:00
|
|
|
dim, err := strconv.Atoi(t.Value)
|
2021-10-19 20:18:47 +08:00
|
|
|
if err != nil {
|
2022-05-24 21:11:59 +08:00
|
|
|
return nil, fmt.Errorf("strconv wrong on get dim, err = %s", err)
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
|
|
|
offset += dim / 8
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
blobReaders := make([]io.Reader, len(msg.RowData))
|
|
|
|
for i, blob := range msg.RowData {
|
|
|
|
blobReaders[i] = bytes.NewReader(blob.GetValue()[offset : offset+8])
|
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
pks := make([]primaryKey, len(blobReaders))
|
2021-10-19 20:18:47 +08:00
|
|
|
|
|
|
|
for i, reader := range blobReaders {
|
2022-04-02 17:43:29 +08:00
|
|
|
var int64PkValue int64
|
|
|
|
err := binary.Read(reader, common.Endian, &int64PkValue)
|
2021-10-19 20:18:47 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("binary read blob value failed", zap.Error(err))
|
2021-11-29 22:01:41 +08:00
|
|
|
return nil, err
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
pks[i] = newInt64PrimaryKey(int64PkValue)
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
|
|
|
|
2021-11-29 22:01:41 +08:00
|
|
|
return pks, nil
|
2021-10-19 20:18:47 +08:00
|
|
|
}
|
2021-12-09 22:19:40 +08:00
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
func getPKsFromColumnBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
|
|
|
primaryFieldSchema, err := typeutil.GetPrimaryFieldSchema(schema)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-03-04 15:09:56 +08:00
|
|
|
}
|
2022-04-02 17:43:29 +08:00
|
|
|
|
|
|
|
primaryFieldData, err := typeutil.GetPrimaryFieldData(msg.GetFieldsData(), primaryFieldSchema)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-03-04 15:09:56 +08:00
|
|
|
}
|
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
pks, err := storage.ParseFieldData2PrimaryKeys(primaryFieldData)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2022-03-04 15:09:56 +08:00
|
|
|
}
|
|
|
|
|
2022-04-02 17:43:29 +08:00
|
|
|
return pks, nil
|
2022-03-04 15:09:56 +08:00
|
|
|
}
|
|
|
|
|
2021-12-09 22:19:40 +08:00
|
|
|
// newInsertNode returns a new insertNode
|
2022-06-02 14:00:04 +08:00
|
|
|
func newInsertNode(metaReplica ReplicaInterface, collectionID UniqueID) *insertNode {
|
2021-12-23 18:39:11 +08:00
|
|
|
maxQueueLength := Params.QueryNodeCfg.FlowGraphMaxQueueLength
|
|
|
|
maxParallelism := Params.QueryNodeCfg.FlowGraphMaxParallelism
|
2020-11-18 17:32:52 +08:00
|
|
|
|
2021-01-15 15:28:54 +08:00
|
|
|
baseNode := baseNode{}
|
2020-11-09 16:27:11 +08:00
|
|
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
|
|
|
baseNode.SetMaxParallelism(maxParallelism)
|
|
|
|
|
|
|
|
return &insertNode{
|
2022-06-02 14:00:04 +08:00
|
|
|
baseNode: baseNode,
|
|
|
|
collectionID: collectionID,
|
|
|
|
metaReplica: metaReplica,
|
2020-11-09 16:27:11 +08:00
|
|
|
}
|
|
|
|
}
|