2021-04-19 11:32:24 +08:00
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
2020-12-09 20:07:27 +08:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2021-10-11 17:28:30 +08:00
|
|
|
"encoding/binary"
|
2021-01-28 17:25:43 +08:00
|
|
|
"encoding/json"
|
2021-09-29 09:52:12 +08:00
|
|
|
"errors"
|
2020-12-09 20:07:27 +08:00
|
|
|
"fmt"
|
2021-09-28 14:30:02 +08:00
|
|
|
"math"
|
2020-12-23 11:34:35 +08:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
2021-09-29 09:52:12 +08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
|
|
|
"go.uber.org/zap"
|
2020-12-09 20:07:27 +08:00
|
|
|
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/etcdpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
2021-06-18 21:30:08 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/rootcoord"
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2020-12-09 20:07:27 +08:00
|
|
|
)
|
|
|
|
|
2020-12-23 18:06:04 +08:00
|
|
|
const (
|
2021-10-13 18:59:26 +08:00
|
|
|
// Ts is blob key "ts"
|
|
|
|
Ts = "ts"
|
|
|
|
// DDL is blob key "ddl"
|
|
|
|
DDL = "ddl"
|
|
|
|
// IndexParamsKey is blob key "indexParams"
|
2021-10-09 19:27:02 +08:00
|
|
|
IndexParamsKey = "indexParams"
|
2020-12-23 18:06:04 +08:00
|
|
|
)
|
|
|
|
|
2021-09-30 17:57:01 +08:00
|
|
|
// when the blob of index file is too large, we can split blob into several rows,
|
|
|
|
// fortunately, the blob has no other semantics which differs from other binlog type,
|
|
|
|
// we then assemble these several rows into a whole blob when deserialize index binlog.
|
|
|
|
// num rows = math.Ceil(len(blob) / maxLengthPerRowOfIndexFile)
|
|
|
|
// There is only a string row in the past version index file which is a subset case of splitting into several rows.
|
|
|
|
// So splitting index file won't introduce incompatibility with past version.
|
|
|
|
const maxLengthPerRowOfIndexFile = 4 * 1024 * 1024
|
|
|
|
|
2020-12-09 20:07:27 +08:00
|
|
|
type (
|
2021-10-26 22:32:44 +08:00
|
|
|
// UniqueID is type alias of typeutil.UniqueID
|
|
|
|
UniqueID = typeutil.UniqueID
|
|
|
|
|
|
|
|
// FieldID represent the identity number of filed in collection and its type is UniqueID
|
|
|
|
FieldID = typeutil.UniqueID
|
|
|
|
|
|
|
|
// Timestamp is type alias of typeutil.Timestamp
|
2020-12-09 20:07:27 +08:00
|
|
|
Timestamp = typeutil.Timestamp
|
|
|
|
)
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// InvalidUniqueID is used when the UniqueID is not set (like in return with err)
|
2021-07-16 17:19:55 +08:00
|
|
|
const InvalidUniqueID = UniqueID(-1)
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// Blob is a pack of key&value
|
2020-12-09 20:07:27 +08:00
|
|
|
type Blob struct {
|
2020-12-23 11:34:35 +08:00
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
}
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// BlobList implements sort.Interface for a list of Blob
|
2020-12-23 11:34:35 +08:00
|
|
|
type BlobList []*Blob
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// Len implements Len in sort.Interface
|
2020-12-23 11:34:35 +08:00
|
|
|
func (s BlobList) Len() int {
|
|
|
|
return len(s)
|
|
|
|
}
|
|
|
|
|
2021-12-20 22:47:07 +08:00
|
|
|
// Less implements Less in sort.Interface
|
2020-12-23 11:34:35 +08:00
|
|
|
func (s BlobList) Less(i, j int) bool {
|
|
|
|
leftValues := strings.Split(s[i].Key, "/")
|
|
|
|
rightValues := strings.Split(s[j].Key, "/")
|
|
|
|
left, _ := strconv.ParseInt(leftValues[len(leftValues)-1], 0, 10)
|
|
|
|
right, _ := strconv.ParseInt(rightValues[len(rightValues)-1], 0, 10)
|
|
|
|
return left < right
|
|
|
|
}
|
|
|
|
|
2021-12-20 22:47:07 +08:00
|
|
|
// Swap implements Swap in sort.Interface
|
2020-12-23 11:34:35 +08:00
|
|
|
func (s BlobList) Swap(i, j int) {
|
|
|
|
s[i], s[j] = s[j], s[i]
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// GetKey returns the key of blob
|
2020-12-22 08:14:36 +08:00
|
|
|
func (b Blob) GetKey() string {
|
2020-12-23 11:34:35 +08:00
|
|
|
return b.Key
|
2020-12-22 08:14:36 +08:00
|
|
|
}
|
|
|
|
|
2021-11-18 19:15:27 +08:00
|
|
|
// GetValue returns the value of blob
|
2020-12-22 08:14:36 +08:00
|
|
|
func (b Blob) GetValue() []byte {
|
2020-12-23 11:34:35 +08:00
|
|
|
return b.Value
|
2020-12-22 08:14:36 +08:00
|
|
|
}
|
|
|
|
|
2021-10-11 17:28:30 +08:00
|
|
|
type FieldData interface {
|
2021-11-12 18:27:10 +08:00
|
|
|
GetMemorySize() int
|
|
|
|
RowNum() int
|
|
|
|
GetRow(i int) interface{}
|
2021-10-11 17:28:30 +08:00
|
|
|
}
|
2020-12-09 20:07:27 +08:00
|
|
|
|
|
|
|
type BoolFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []bool
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type Int8FieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []int8
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type Int16FieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []int16
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type Int32FieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []int32
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type Int64FieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []int64
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type FloatFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []float32
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type DoubleFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []float64
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type StringFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []string
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type BinaryVectorFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []byte
|
|
|
|
Dim int
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
type FloatVectorFieldData struct {
|
2021-07-24 09:25:22 +08:00
|
|
|
NumRows []int64
|
2020-12-21 16:27:03 +08:00
|
|
|
Data []float32
|
|
|
|
Dim int
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-12-01 15:39:57 +08:00
|
|
|
// RowNum implements FieldData.RowNum
|
2021-11-12 18:27:10 +08:00
|
|
|
func (data *BoolFieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *Int8FieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *Int16FieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *Int32FieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *Int64FieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *FloatFieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *DoubleFieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *StringFieldData) RowNum() int { return len(data.Data) }
|
|
|
|
func (data *BinaryVectorFieldData) RowNum() int { return len(data.Data) * 8 / data.Dim }
|
|
|
|
func (data *FloatVectorFieldData) RowNum() int { return len(data.Data) / data.Dim }
|
|
|
|
|
2021-12-01 15:39:57 +08:00
|
|
|
// GetRow implements FieldData.GetRow
|
2021-11-12 18:27:10 +08:00
|
|
|
func (data *BoolFieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *Int8FieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *Int16FieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *Int32FieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *Int64FieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *FloatFieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *DoubleFieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *StringFieldData) GetRow(i int) interface{} { return data.Data[i] }
|
|
|
|
func (data *BinaryVectorFieldData) GetRow(i int) interface{} {
|
|
|
|
return data.Data[i*data.Dim/8 : (i+1)*data.Dim/8]
|
|
|
|
}
|
|
|
|
func (data *FloatVectorFieldData) GetRow(i int) interface{} {
|
|
|
|
return data.Data[i*data.Dim : (i+1)*data.Dim]
|
|
|
|
}
|
|
|
|
|
2021-10-11 17:28:30 +08:00
|
|
|
// why not binary.Size(data) directly? binary.Size(data) return -1
|
|
|
|
// binary.Size returns how many bytes Write would generate to encode the value v, which
|
|
|
|
// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data.
|
|
|
|
// If v is neither of these, binary.Size returns -1.
|
|
|
|
|
2021-12-01 15:39:57 +08:00
|
|
|
// GetMemorySize implements FieldData.GetMemorySize
|
2021-10-11 17:28:30 +08:00
|
|
|
func (data *BoolFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
2021-12-22 19:31:51 +08:00
|
|
|
// GetMemorySize implements FieldData.GetMemorySize
|
2021-10-11 17:28:30 +08:00
|
|
|
func (data *Int8FieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *Int16FieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *Int32FieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *Int64FieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *FloatFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *DoubleFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *StringFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *BinaryVectorFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data) + binary.Size(data.Dim)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (data *FloatVectorFieldData) GetMemorySize() int {
|
|
|
|
return binary.Size(data.NumRows) + binary.Size(data.Data) + binary.Size(data.Dim)
|
|
|
|
}
|
|
|
|
|
2020-12-09 20:07:27 +08:00
|
|
|
// system filed id:
|
|
|
|
// 0: unique row id
|
|
|
|
// 1: timestamp
|
|
|
|
// 100: first user field id
|
|
|
|
// 101: second user field id
|
|
|
|
// 102: ...
|
|
|
|
|
2021-06-16 12:03:57 +08:00
|
|
|
// TODO: fill it
|
|
|
|
// info for each blob
|
|
|
|
type BlobInfo struct {
|
|
|
|
Length int
|
|
|
|
}
|
|
|
|
|
2020-12-09 20:07:27 +08:00
|
|
|
// example row_schema: {float_field, int_field, float_vector_field, string_field}
|
|
|
|
// Data {<0, row_id>, <1, timestamp>, <100, float_field>, <101, int_field>, <102, float_vector_field>, <103, string_field>}
|
|
|
|
type InsertData struct {
|
2021-06-16 12:03:57 +08:00
|
|
|
Data map[FieldID]FieldData // field id to field data
|
|
|
|
Infos []BlobInfo
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-09-15 10:47:48 +08:00
|
|
|
// InsertCodec serializes and deserializes the insert data
|
2020-12-09 20:07:27 +08:00
|
|
|
// Blob key example:
|
2020-12-18 15:21:25 +08:00
|
|
|
// ${tenant}/insert_log/${collection_id}/${partition_id}/${segment_id}/${field_id}/${log_idx}
|
2020-12-09 20:07:27 +08:00
|
|
|
type InsertCodec struct {
|
2021-11-22 17:27:14 +08:00
|
|
|
Schema *etcdpb.CollectionMeta
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2020-12-23 18:06:04 +08:00
|
|
|
func NewInsertCodec(schema *etcdpb.CollectionMeta) *InsertCodec {
|
|
|
|
return &InsertCodec{Schema: schema}
|
|
|
|
}
|
|
|
|
|
2021-09-17 11:05:49 +08:00
|
|
|
// Serialize transfer insert data to blob. It will sort insert data by timestamp.
|
|
|
|
// From schema, it get all fields.
|
2021-12-20 19:08:38 +08:00
|
|
|
// For each field, it will create a binlog writer, and write an event to the binlog.
|
2021-09-17 11:05:49 +08:00
|
|
|
// It returns binlog buffer in the end.
|
2021-05-20 18:38:45 +08:00
|
|
|
func (insertCodec *InsertCodec) Serialize(partitionID UniqueID, segmentID UniqueID, data *InsertData) ([]*Blob, []*Blob, error) {
|
2021-10-22 12:51:11 +08:00
|
|
|
blobs := make([]*Blob, 0)
|
|
|
|
statsBlobs := make([]*Blob, 0)
|
2020-12-09 20:07:27 +08:00
|
|
|
var writer *InsertBinlogWriter
|
2021-06-18 21:30:08 +08:00
|
|
|
timeFieldData, ok := data.Data[rootcoord.TimeStampField]
|
2020-12-18 15:21:25 +08:00
|
|
|
if !ok {
|
2021-07-07 19:10:07 +08:00
|
|
|
return nil, nil, fmt.Errorf("data doesn't contains timestamp field")
|
2020-12-18 15:21:25 +08:00
|
|
|
}
|
2021-11-26 17:43:17 +08:00
|
|
|
if timeFieldData.RowNum() <= 0 {
|
2021-12-13 13:22:14 +08:00
|
|
|
return nil, nil, fmt.Errorf("there's no data in InsertData")
|
2021-11-26 17:43:17 +08:00
|
|
|
}
|
|
|
|
|
2020-12-23 11:34:35 +08:00
|
|
|
ts := timeFieldData.(*Int64FieldData).Data
|
2021-05-14 10:59:49 +08:00
|
|
|
startTs := ts[0]
|
|
|
|
endTs := ts[len(ts)-1]
|
|
|
|
|
|
|
|
dataSorter := &DataSorter{
|
|
|
|
InsertCodec: insertCodec,
|
|
|
|
InsertData: data,
|
|
|
|
}
|
|
|
|
sort.Sort(dataSorter)
|
2020-12-09 20:07:27 +08:00
|
|
|
|
2020-12-11 11:29:07 +08:00
|
|
|
for _, field := range insertCodec.Schema.Schema.Fields {
|
|
|
|
singleData := data.Data[field.FieldID]
|
2021-05-20 18:38:45 +08:00
|
|
|
|
|
|
|
// encode fields
|
2021-04-19 10:36:19 +08:00
|
|
|
writer = NewInsertBinlogWriter(field.DataType, insertCodec.Schema.ID, partitionID, segmentID, field.FieldID)
|
2020-12-11 11:29:07 +08:00
|
|
|
eventWriter, err := writer.NextInsertEventWriter()
|
|
|
|
if err != nil {
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, err
|
2020-12-11 11:29:07 +08:00
|
|
|
}
|
2021-05-20 18:38:45 +08:00
|
|
|
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(typeutil.Timestamp(startTs), typeutil.Timestamp(endTs))
|
2020-12-11 11:29:07 +08:00
|
|
|
switch field.DataType {
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Bool:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddBoolToPayload(singleData.(*BoolFieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*BoolFieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int8:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddInt8ToPayload(singleData.(*Int8FieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*Int8FieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int16:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddInt16ToPayload(singleData.(*Int16FieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*Int16FieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int32:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddInt32ToPayload(singleData.(*Int32FieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*Int32FieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int64:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddInt64ToPayload(singleData.(*Int64FieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*Int64FieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Float:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddFloatToPayload(singleData.(*FloatFieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*FloatFieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Double:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddDoubleToPayload(singleData.(*DoubleFieldData).Data)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*DoubleFieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_String:
|
2020-12-23 11:34:35 +08:00
|
|
|
for _, singleString := range singleData.(*StringFieldData).Data {
|
2020-12-09 20:07:27 +08:00
|
|
|
err = eventWriter.AddOneStringToPayload(singleString)
|
2020-12-11 11:29:07 +08:00
|
|
|
if err != nil {
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, err
|
2020-12-11 11:29:07 +08:00
|
|
|
}
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*StringFieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_BinaryVector:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddBinaryVectorToPayload(singleData.(*BinaryVectorFieldData).Data, singleData.(*BinaryVectorFieldData).Dim)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*BinaryVectorFieldData).GetMemorySize()))
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_FloatVector:
|
2020-12-23 11:34:35 +08:00
|
|
|
err = eventWriter.AddFloatVectorToPayload(singleData.(*FloatVectorFieldData).Data, singleData.(*FloatVectorFieldData).Dim)
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", singleData.(*FloatVectorFieldData).GetMemorySize()))
|
2020-12-11 18:14:19 +08:00
|
|
|
default:
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, fmt.Errorf("undefined data type %d", field.DataType)
|
2020-12-11 11:29:07 +08:00
|
|
|
}
|
|
|
|
if err != nil {
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
writer.SetEventTimeStamp(typeutil.Timestamp(startTs), typeutil.Timestamp(endTs))
|
2020-12-09 20:07:27 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = writer.Finish()
|
2020-12-09 20:07:27 +08:00
|
|
|
if err != nil {
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2020-12-10 15:50:09 +08:00
|
|
|
buffer, err := writer.GetBuffer()
|
|
|
|
if err != nil {
|
2021-05-20 18:38:45 +08:00
|
|
|
return nil, nil, err
|
2020-12-10 15:50:09 +08:00
|
|
|
}
|
2020-12-18 15:21:25 +08:00
|
|
|
blobKey := fmt.Sprintf("%d", field.FieldID)
|
2020-12-09 20:07:27 +08:00
|
|
|
blobs = append(blobs, &Blob{
|
2020-12-23 11:34:35 +08:00
|
|
|
Key: blobKey,
|
|
|
|
Value: buffer,
|
2020-12-09 20:07:27 +08:00
|
|
|
})
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
writer.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
|
2021-05-20 18:38:45 +08:00
|
|
|
// stats fields
|
|
|
|
switch field.DataType {
|
|
|
|
case schemapb.DataType_Int64:
|
2021-10-22 12:51:11 +08:00
|
|
|
statsWriter := &StatsWriter{}
|
2021-10-19 20:18:47 +08:00
|
|
|
err = statsWriter.StatsInt64(field.FieldID, field.IsPrimaryKey, singleData.(*Int64FieldData).Data)
|
2021-10-22 12:51:11 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
statsBuffer := statsWriter.GetBuffer()
|
|
|
|
statsBlobs = append(statsBlobs, &Blob{
|
|
|
|
Key: blobKey,
|
|
|
|
Value: statsBuffer,
|
|
|
|
})
|
2021-05-20 18:38:45 +08:00
|
|
|
}
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-05-20 18:38:45 +08:00
|
|
|
return blobs, statsBlobs, nil
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
|
2021-09-29 09:52:12 +08:00
|
|
|
func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|
|
|
collectionID UniqueID,
|
|
|
|
partitionID UniqueID,
|
|
|
|
segmentID UniqueID,
|
|
|
|
data *InsertData,
|
|
|
|
err error,
|
|
|
|
) {
|
2020-12-09 20:07:27 +08:00
|
|
|
if len(blobs) == 0 {
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("blobs is empty")
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
|
|
|
|
var blobList BlobList = blobs
|
|
|
|
sort.Sort(blobList)
|
|
|
|
|
2021-09-29 09:52:12 +08:00
|
|
|
var cID UniqueID
|
2020-12-11 11:29:07 +08:00
|
|
|
var pID UniqueID
|
|
|
|
var sID UniqueID
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData := &InsertData{}
|
2020-12-11 12:01:20 +08:00
|
|
|
resultData.Data = make(map[FieldID]FieldData)
|
2020-12-23 11:34:35 +08:00
|
|
|
for _, blob := range blobList {
|
|
|
|
binlogReader, err := NewBinlogReader(blob.Value)
|
2020-12-09 20:07:27 +08:00
|
|
|
if err != nil {
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2020-12-11 11:29:07 +08:00
|
|
|
// read partitionID and SegmentID
|
2021-09-29 09:52:12 +08:00
|
|
|
cID, pID, sID = binlogReader.CollectionID, binlogReader.PartitionID, binlogReader.SegmentID
|
2020-12-11 11:29:07 +08:00
|
|
|
|
|
|
|
dataType := binlogReader.PayloadDataType
|
|
|
|
fieldID := binlogReader.FieldID
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength := 0
|
2020-12-23 11:34:35 +08:00
|
|
|
for {
|
2020-12-09 20:07:27 +08:00
|
|
|
eventReader, err := binlogReader.NextEventReader()
|
|
|
|
if err != nil {
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
if eventReader == nil {
|
|
|
|
break
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
switch dataType {
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Bool:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &BoolFieldData{}
|
|
|
|
}
|
|
|
|
boolFieldData := resultData.Data[fieldID].(*BoolFieldData)
|
|
|
|
singleData, err := eventReader.GetBoolFromPayload()
|
2020-12-09 20:07:27 +08:00
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
boolFieldData.Data = append(boolFieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
boolFieldData.NumRows = append(boolFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = boolFieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int8:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &Int8FieldData{}
|
|
|
|
}
|
|
|
|
int8FieldData := resultData.Data[fieldID].(*Int8FieldData)
|
|
|
|
singleData, err := eventReader.GetInt8FromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
int8FieldData.Data = append(int8FieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
int8FieldData.NumRows = append(int8FieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = int8FieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int16:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &Int16FieldData{}
|
|
|
|
}
|
|
|
|
int16FieldData := resultData.Data[fieldID].(*Int16FieldData)
|
|
|
|
singleData, err := eventReader.GetInt16FromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
int16FieldData.Data = append(int16FieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
int16FieldData.NumRows = append(int16FieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = int16FieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int32:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &Int32FieldData{}
|
|
|
|
}
|
|
|
|
int32FieldData := resultData.Data[fieldID].(*Int32FieldData)
|
|
|
|
singleData, err := eventReader.GetInt32FromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
int32FieldData.Data = append(int32FieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
int32FieldData.NumRows = append(int32FieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = int32FieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int64:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &Int64FieldData{}
|
|
|
|
}
|
|
|
|
int64FieldData := resultData.Data[fieldID].(*Int64FieldData)
|
|
|
|
singleData, err := eventReader.GetInt64FromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
int64FieldData.Data = append(int64FieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
int64FieldData.NumRows = append(int64FieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = int64FieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Float:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &FloatFieldData{}
|
|
|
|
}
|
|
|
|
floatFieldData := resultData.Data[fieldID].(*FloatFieldData)
|
|
|
|
singleData, err := eventReader.GetFloatFromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
floatFieldData.Data = append(floatFieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
floatFieldData.NumRows = append(floatFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = floatFieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Double:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &DoubleFieldData{}
|
|
|
|
}
|
|
|
|
doubleFieldData := resultData.Data[fieldID].(*DoubleFieldData)
|
|
|
|
singleData, err := eventReader.GetDoubleFromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
doubleFieldData.Data = append(doubleFieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
doubleFieldData.NumRows = append(doubleFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = doubleFieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_String:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &StringFieldData{}
|
|
|
|
}
|
|
|
|
stringFieldData := resultData.Data[fieldID].(*StringFieldData)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
stringFieldData.NumRows = append(stringFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
for i := 0; i < length; i++ {
|
|
|
|
singleString, err := eventReader.GetOneStringFromPayload(i)
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
stringFieldData.Data = append(stringFieldData.Data, singleString)
|
|
|
|
}
|
|
|
|
resultData.Data[fieldID] = stringFieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_BinaryVector:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &BinaryVectorFieldData{}
|
|
|
|
}
|
|
|
|
binaryVectorFieldData := resultData.Data[fieldID].(*BinaryVectorFieldData)
|
|
|
|
var singleData []byte
|
|
|
|
singleData, binaryVectorFieldData.Dim, err = eventReader.GetBinaryVectorFromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
binaryVectorFieldData.Data = append(binaryVectorFieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
binaryVectorFieldData.NumRows = append(binaryVectorFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = binaryVectorFieldData
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_FloatVector:
|
2020-12-23 11:34:35 +08:00
|
|
|
if resultData.Data[fieldID] == nil {
|
|
|
|
resultData.Data[fieldID] = &FloatVectorFieldData{}
|
|
|
|
}
|
|
|
|
floatVectorFieldData := resultData.Data[fieldID].(*FloatVectorFieldData)
|
|
|
|
var singleData []float32
|
|
|
|
singleData, floatVectorFieldData.Dim, err = eventReader.GetFloatVectorFromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
|
|
|
floatVectorFieldData.Data = append(floatVectorFieldData.Data, singleData...)
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
2020-12-23 11:34:35 +08:00
|
|
|
}
|
2021-06-16 12:03:57 +08:00
|
|
|
totalLength += length
|
2021-07-24 09:25:22 +08:00
|
|
|
floatVectorFieldData.NumRows = append(floatVectorFieldData.NumRows, int64(length))
|
2020-12-23 11:34:35 +08:00
|
|
|
resultData.Data[fieldID] = floatVectorFieldData
|
|
|
|
default:
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("undefined data type %d", dataType)
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-06-18 21:30:08 +08:00
|
|
|
if fieldID == rootcoord.TimeStampField {
|
2021-06-16 12:03:57 +08:00
|
|
|
blobInfo := BlobInfo{
|
|
|
|
Length: totalLength,
|
|
|
|
}
|
|
|
|
resultData.Infos = append(resultData.Infos, blobInfo)
|
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-09-29 09:52:12 +08:00
|
|
|
return cID, pID, sID, resultData, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deserialize transfer blob back to insert data.
|
|
|
|
// From schema, it get all fields.
|
|
|
|
// For each field, it will create a binlog reader, and read all event to the buffer.
|
|
|
|
// It returns origin @InsertData in the end.
|
|
|
|
func (insertCodec *InsertCodec) Deserialize(blobs []*Blob) (partitionID UniqueID, segmentID UniqueID, data *InsertData, err error) {
|
|
|
|
_, partitionID, segmentID, data, err = insertCodec.DeserializeAll(blobs)
|
|
|
|
return partitionID, segmentID, data, err
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-09-28 14:30:02 +08:00
|
|
|
// DeleteData saves each entity delete message represented as <primarykey,timestamp> map.
|
|
|
|
// timestamp represents the time when this instance was deleted
|
|
|
|
type DeleteData struct {
|
2021-11-09 15:01:17 +08:00
|
|
|
Pks []int64 // primary keys
|
|
|
|
Tss []Timestamp // timestamps
|
|
|
|
RowCount int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append append 1 pk&ts pair to DeleteData
|
|
|
|
func (data *DeleteData) Append(pk UniqueID, ts Timestamp) {
|
|
|
|
data.Pks = append(data.Pks, pk)
|
|
|
|
data.Tss = append(data.Tss, ts)
|
|
|
|
data.RowCount++
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
|
|
|
|
2021-10-13 18:57:44 +08:00
|
|
|
// DeleteCodec serializes and deserializes the delete data
|
2021-09-28 14:30:02 +08:00
|
|
|
type DeleteCodec struct {
|
|
|
|
}
|
|
|
|
|
2021-10-13 18:57:44 +08:00
|
|
|
// NewDeleteCodec returns a DeleteCodec
|
2021-10-24 09:59:10 +08:00
|
|
|
func NewDeleteCodec() *DeleteCodec {
|
|
|
|
return &DeleteCodec{}
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize transfer delete data to blob. .
|
|
|
|
// For each delete message, it will save "pk,ts" string to binlog.
|
2021-10-24 09:59:10 +08:00
|
|
|
func (deleteCodec *DeleteCodec) Serialize(collectionID UniqueID, partitionID UniqueID, segmentID UniqueID, data *DeleteData) (*Blob, error) {
|
|
|
|
binlogWriter := NewDeleteBinlogWriter(schemapb.DataType_String, collectionID, partitionID, segmentID)
|
2021-09-28 14:30:02 +08:00
|
|
|
eventWriter, err := binlogWriter.NextDeleteEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
if len(data.Pks) != len(data.Tss) {
|
2021-12-22 09:59:04 +08:00
|
|
|
return nil, fmt.Errorf("the length of pks, and TimeStamps is not equal")
|
2021-11-09 15:01:17 +08:00
|
|
|
}
|
|
|
|
length := len(data.Pks)
|
2021-10-11 17:28:30 +08:00
|
|
|
sizeTotal := 0
|
2021-11-09 15:01:17 +08:00
|
|
|
var startTs, endTs Timestamp
|
|
|
|
startTs, endTs = math.MaxUint64, 0
|
|
|
|
for i := 0; i < length; i++ {
|
|
|
|
pk := data.Pks[i]
|
|
|
|
ts := data.Tss[i]
|
|
|
|
if ts < startTs {
|
|
|
|
startTs = ts
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
if ts > endTs {
|
|
|
|
endTs = ts
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
err := eventWriter.AddOneStringToPayload(fmt.Sprintf("%d,%d", pk, ts))
|
2021-09-28 14:30:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
sizeTotal += binary.Size(pk)
|
|
|
|
sizeTotal += binary.Size(ts)
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
eventWriter.SetEventTimestamp(startTs, endTs)
|
|
|
|
binlogWriter.SetEventTimeStamp(startTs, endTs)
|
2021-10-11 17:28:30 +08:00
|
|
|
|
|
|
|
// https://github.com/milvus-io/milvus/issues/9620
|
|
|
|
// It's a little complicated to count the memory size of a map.
|
|
|
|
// See: https://stackoverflow.com/questions/31847549/computing-the-memory-footprint-or-byte-length-of-a-map
|
|
|
|
// Since the implementation of golang map may differ from version, so we'd better not to use this magic method.
|
2021-10-11 21:02:37 +08:00
|
|
|
binlogWriter.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = binlogWriter.Finish()
|
2021-09-28 14:30:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
buffer, err := binlogWriter.GetBuffer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
blob := &Blob{
|
|
|
|
Value: buffer,
|
|
|
|
}
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
binlogWriter.Close()
|
2021-09-28 14:30:02 +08:00
|
|
|
return blob, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-10-19 10:28:35 +08:00
|
|
|
// Deserialize deserializes the deltalog blobs into DeleteData
|
|
|
|
func (deleteCodec *DeleteCodec) Deserialize(blobs []*Blob) (partitionID UniqueID, segmentID UniqueID, data *DeleteData, err error) {
|
|
|
|
if len(blobs) == 0 {
|
2021-09-28 14:30:02 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("blobs is empty")
|
|
|
|
}
|
2021-10-19 10:28:35 +08:00
|
|
|
|
|
|
|
var pid, sid UniqueID
|
2021-11-09 15:01:17 +08:00
|
|
|
result := &DeleteData{}
|
2021-10-19 10:28:35 +08:00
|
|
|
for _, blob := range blobs {
|
|
|
|
binlogReader, err := NewBinlogReader(blob.Value)
|
2021-09-28 14:30:02 +08:00
|
|
|
if err != nil {
|
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
|
|
|
}
|
2021-10-19 10:28:35 +08:00
|
|
|
|
|
|
|
pid, sid = binlogReader.PartitionID, binlogReader.SegmentID
|
|
|
|
eventReader, err := binlogReader.NextEventReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-10-19 10:28:35 +08:00
|
|
|
|
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
2021-09-28 14:30:02 +08:00
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-10-19 10:28:35 +08:00
|
|
|
|
|
|
|
for i := 0; i < length; i++ {
|
|
|
|
singleString, err := eventReader.GetOneStringFromPayload(i)
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
splits := strings.Split(singleString, ",")
|
|
|
|
if len(splits) != 2 {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("the format of delta log is incorrect")
|
|
|
|
}
|
|
|
|
|
2021-10-22 15:37:12 +08:00
|
|
|
pk, err := strconv.ParseInt(splits[0], 10, 64)
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-22 15:37:12 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
|
|
|
}
|
|
|
|
|
2021-11-09 15:01:17 +08:00
|
|
|
ts, err := strconv.ParseUint(splits[1], 10, 64)
|
2021-10-19 10:28:35 +08:00
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
return InvalidUniqueID, InvalidUniqueID, nil, err
|
|
|
|
}
|
|
|
|
|
2021-11-09 15:01:17 +08:00
|
|
|
result.Pks = append(result.Pks, pk)
|
|
|
|
result.Tss = append(result.Tss, ts)
|
2021-10-19 10:28:35 +08:00
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-10-19 10:28:35 +08:00
|
|
|
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
2021-11-09 15:01:17 +08:00
|
|
|
result.RowCount = int64(len(result.Pks))
|
2021-09-28 14:30:02 +08:00
|
|
|
|
2021-10-19 10:28:35 +08:00
|
|
|
return pid, sid, result, nil
|
2021-09-28 14:30:02 +08:00
|
|
|
}
|
|
|
|
|
2021-12-02 18:57:32 +08:00
|
|
|
// DataDefinitionCodec serializes and deserializes the data definition
|
2020-12-09 20:07:27 +08:00
|
|
|
// Blob key example:
|
2020-12-23 18:06:04 +08:00
|
|
|
// ${tenant}/data_definition_log/${collection_id}/ts/${log_idx}
|
|
|
|
// ${tenant}/data_definition_log/${collection_id}/ddl/${log_idx}
|
2020-12-09 20:07:27 +08:00
|
|
|
type DataDefinitionCodec struct {
|
2021-11-22 17:27:14 +08:00
|
|
|
collectionID int64
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
|
2021-12-02 18:57:32 +08:00
|
|
|
// NewDataDefinitionCodec is constructor for DataDefinitionCodec
|
2020-12-23 18:06:04 +08:00
|
|
|
func NewDataDefinitionCodec(collectionID int64) *DataDefinitionCodec {
|
|
|
|
return &DataDefinitionCodec{collectionID: collectionID}
|
|
|
|
}
|
|
|
|
|
2021-09-17 11:05:49 +08:00
|
|
|
// Serialize transfer @ts and @ddRequsts to blob.
|
|
|
|
// From schema, it get all fields.
|
|
|
|
// For each field, it will create a binlog writer, and write specific event according
|
|
|
|
// to the dataDefinition type.
|
|
|
|
// It returns blobs in the end.
|
2020-12-11 11:29:07 +08:00
|
|
|
func (dataDefinitionCodec *DataDefinitionCodec) Serialize(ts []Timestamp, ddRequests []string, eventTypes []EventTypeCode) ([]*Blob, error) {
|
2021-04-19 10:36:19 +08:00
|
|
|
writer := NewDDLBinlogWriter(schemapb.DataType_Int64, dataDefinitionCodec.collectionID)
|
2020-12-09 20:07:27 +08:00
|
|
|
|
|
|
|
var blobs []*Blob
|
|
|
|
|
2020-12-23 18:06:04 +08:00
|
|
|
eventWriter, err := writer.NextCreateCollectionEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var int64Ts []int64
|
|
|
|
for _, singleTs := range ts {
|
|
|
|
int64Ts = append(int64Ts, int64(singleTs))
|
|
|
|
}
|
|
|
|
err = eventWriter.AddInt64ToPayload(int64Ts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(ts[0], ts[len(ts)-1])
|
|
|
|
writer.SetEventTimeStamp(ts[0], ts[len(ts)-1])
|
2021-10-11 17:28:30 +08:00
|
|
|
|
|
|
|
// https://github.com/milvus-io/milvus/issues/9620
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", binary.Size(int64Ts)))
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = writer.Finish()
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2020-12-23 18:06:04 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
buffer, err := writer.GetBuffer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
blobs = append(blobs, &Blob{
|
|
|
|
Key: Ts,
|
|
|
|
Value: buffer,
|
|
|
|
})
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
writer.Close()
|
2020-12-23 18:06:04 +08:00
|
|
|
|
2021-04-19 10:36:19 +08:00
|
|
|
writer = NewDDLBinlogWriter(schemapb.DataType_String, dataDefinitionCodec.collectionID)
|
2020-12-23 18:06:04 +08:00
|
|
|
|
2021-10-11 17:28:30 +08:00
|
|
|
sizeTotal := 0
|
2020-12-09 20:07:27 +08:00
|
|
|
for pos, req := range ddRequests {
|
2021-10-11 17:28:30 +08:00
|
|
|
sizeTotal += len(req)
|
2020-12-09 20:07:27 +08:00
|
|
|
switch eventTypes[pos] {
|
|
|
|
case CreateCollectionEventType:
|
|
|
|
eventWriter, err := writer.NextCreateCollectionEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = eventWriter.AddOneStringToPayload(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(ts[pos], ts[pos])
|
2020-12-09 20:07:27 +08:00
|
|
|
case DropCollectionEventType:
|
|
|
|
eventWriter, err := writer.NextDropCollectionEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = eventWriter.AddOneStringToPayload(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(ts[pos], ts[pos])
|
2020-12-09 20:07:27 +08:00
|
|
|
case CreatePartitionEventType:
|
|
|
|
eventWriter, err := writer.NextCreatePartitionEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = eventWriter.AddOneStringToPayload(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(ts[pos], ts[pos])
|
2020-12-09 20:07:27 +08:00
|
|
|
case DropPartitionEventType:
|
|
|
|
eventWriter, err := writer.NextDropPartitionEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = eventWriter.AddOneStringToPayload(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
eventWriter.SetEventTimestamp(ts[pos], ts[pos])
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-07 19:10:07 +08:00
|
|
|
writer.SetEventTimeStamp(ts[0], ts[len(ts)-1])
|
2021-10-11 17:28:30 +08:00
|
|
|
|
|
|
|
// https://github.com/milvus-io/milvus/issues/9620
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = writer.Finish()
|
2020-12-09 20:07:27 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-10 15:50:09 +08:00
|
|
|
buffer, err = writer.GetBuffer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-09 20:07:27 +08:00
|
|
|
blobs = append(blobs, &Blob{
|
2020-12-23 18:06:04 +08:00
|
|
|
Key: DDL,
|
2020-12-23 11:34:35 +08:00
|
|
|
Value: buffer,
|
2020-12-09 20:07:27 +08:00
|
|
|
})
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
writer.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
|
|
|
|
return blobs, nil
|
|
|
|
}
|
|
|
|
|
2021-09-17 11:05:49 +08:00
|
|
|
// Deserialize transfer blob back to data definition data.
|
|
|
|
// From schema, it get all fields.
|
|
|
|
// It will sort blob by blob key for blob logid is increasing by time.
|
|
|
|
// For each field, it will create a binlog reader, and read all event to the buffer.
|
|
|
|
// It returns origin @ts and @ddRequests in the end.
|
2020-12-10 09:57:14 +08:00
|
|
|
func (dataDefinitionCodec *DataDefinitionCodec) Deserialize(blobs []*Blob) (ts []Timestamp, ddRequests []string, err error) {
|
2020-12-09 20:07:27 +08:00
|
|
|
if len(blobs) == 0 {
|
2021-07-07 19:10:07 +08:00
|
|
|
return nil, nil, fmt.Errorf("blobs is empty")
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
|
|
|
var requestsStrings []string
|
|
|
|
var resultTs []Timestamp
|
2020-12-23 11:34:35 +08:00
|
|
|
|
|
|
|
var blobList BlobList = blobs
|
|
|
|
sort.Sort(blobList)
|
|
|
|
|
|
|
|
for _, blob := range blobList {
|
|
|
|
binlogReader, err := NewBinlogReader(blob.Value)
|
2020-12-09 20:07:27 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2020-12-18 15:21:25 +08:00
|
|
|
dataType := binlogReader.PayloadDataType
|
2020-12-11 11:29:07 +08:00
|
|
|
|
2020-12-23 11:34:35 +08:00
|
|
|
for {
|
2020-12-09 20:07:27 +08:00
|
|
|
eventReader, err := binlogReader.NextEventReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
if eventReader == nil {
|
|
|
|
break
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2020-12-23 11:34:35 +08:00
|
|
|
switch dataType {
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_Int64:
|
2020-12-23 11:34:35 +08:00
|
|
|
int64Ts, err := eventReader.GetInt64FromPayload()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2020-12-23 11:34:35 +08:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
for _, singleTs := range int64Ts {
|
|
|
|
resultTs = append(resultTs, Timestamp(singleTs))
|
|
|
|
}
|
2021-03-12 14:22:09 +08:00
|
|
|
case schemapb.DataType_String:
|
2020-12-09 20:07:27 +08:00
|
|
|
length, err := eventReader.GetPayloadLengthFromReader()
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
for i := 0; i < length; i++ {
|
|
|
|
singleString, err := eventReader.GetOneStringFromPayload(i)
|
|
|
|
if err != nil {
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
requestsStrings = append(requestsStrings, singleString)
|
|
|
|
}
|
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2020-12-09 20:07:27 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return resultTs, requestsStrings, nil
|
|
|
|
}
|
|
|
|
|
2021-09-29 09:52:12 +08:00
|
|
|
type IndexFileBinlogCodec struct {
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewIndexFileBinlogCodec() *IndexFileBinlogCodec {
|
2021-11-22 17:27:14 +08:00
|
|
|
return &IndexFileBinlogCodec{}
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (codec *IndexFileBinlogCodec) Serialize(
|
|
|
|
indexBuildID UniqueID,
|
|
|
|
version int64,
|
|
|
|
collectionID UniqueID,
|
|
|
|
partitionID UniqueID,
|
|
|
|
segmentID UniqueID,
|
|
|
|
fieldID UniqueID,
|
|
|
|
indexParams map[string]string,
|
|
|
|
indexName string,
|
|
|
|
indexID UniqueID,
|
|
|
|
datas []*Blob,
|
|
|
|
) ([]*Blob, error) {
|
|
|
|
|
|
|
|
var err error
|
|
|
|
|
|
|
|
var blobs []*Blob
|
|
|
|
|
|
|
|
ts := Timestamp(time.Now().UnixNano())
|
|
|
|
|
|
|
|
for pos := range datas {
|
|
|
|
writer := NewIndexFileBinlogWriter(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexName, indexID, datas[pos].Key)
|
|
|
|
|
2021-10-09 16:08:56 +08:00
|
|
|
// https://github.com/milvus-io/milvus/issues/9449
|
|
|
|
// store index parameters to extra, in bytes format.
|
|
|
|
params, _ := json.Marshal(indexParams)
|
2021-10-09 19:27:02 +08:00
|
|
|
writer.descriptorEvent.AddExtra(IndexParamsKey, params)
|
2021-10-09 16:08:56 +08:00
|
|
|
|
2021-09-29 09:52:12 +08:00
|
|
|
eventWriter, err := writer.NextIndexFileEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-12-16 16:35:42 +08:00
|
|
|
err = eventWriter.AddByteToPayload(datas[pos].Value)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
eventWriter.SetEventTimestamp(ts, ts)
|
|
|
|
|
|
|
|
writer.SetEventTimeStamp(ts, ts)
|
|
|
|
|
2021-10-11 17:28:30 +08:00
|
|
|
// https://github.com/milvus-io/milvus/issues/9620
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", len(datas[pos].Value)))
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = writer.Finish()
|
2021-09-29 09:52:12 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
buffer, err := writer.GetBuffer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
blobs = append(blobs, &Blob{
|
|
|
|
Key: datas[pos].Key,
|
|
|
|
//Key: strconv.Itoa(pos),
|
|
|
|
Value: buffer,
|
|
|
|
})
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
writer.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// save index params
|
2021-10-09 19:27:02 +08:00
|
|
|
writer := NewIndexFileBinlogWriter(indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexName, indexID, IndexParamsKey)
|
2021-09-29 09:52:12 +08:00
|
|
|
|
|
|
|
eventWriter, err := writer.NextIndexFileEventWriter()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
params, _ := json.Marshal(indexParams)
|
2021-12-16 16:35:42 +08:00
|
|
|
err = eventWriter.AddByteToPayload(params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
eventWriter.SetEventTimestamp(ts, ts)
|
|
|
|
|
|
|
|
writer.SetEventTimeStamp(ts, ts)
|
|
|
|
|
2021-10-11 17:28:30 +08:00
|
|
|
// https://github.com/milvus-io/milvus/issues/9620
|
|
|
|
// len(params) is also not accurate, indexParams is a map
|
2021-10-11 21:02:37 +08:00
|
|
|
writer.AddExtra(originalSizeKey, fmt.Sprintf("%v", len(params)))
|
2021-10-11 17:28:30 +08:00
|
|
|
|
2021-12-09 12:37:06 +08:00
|
|
|
err = writer.Finish()
|
2021-09-29 09:52:12 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
buffer, err := writer.GetBuffer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
blobs = append(blobs, &Blob{
|
2021-10-09 19:27:02 +08:00
|
|
|
Key: IndexParamsKey,
|
2021-09-29 09:52:12 +08:00
|
|
|
//Key: strconv.Itoa(len(datas)),
|
|
|
|
Value: buffer,
|
|
|
|
})
|
2021-12-09 12:37:06 +08:00
|
|
|
eventWriter.Close()
|
|
|
|
writer.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
|
|
|
|
return blobs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (codec *IndexFileBinlogCodec) DeserializeImpl(blobs []*Blob) (
|
|
|
|
indexBuildID UniqueID,
|
|
|
|
version int64,
|
|
|
|
collectionID UniqueID,
|
|
|
|
partitionID UniqueID,
|
|
|
|
segmentID UniqueID,
|
|
|
|
fieldID UniqueID,
|
|
|
|
indexParams map[string]string,
|
|
|
|
indexName string,
|
|
|
|
indexID UniqueID,
|
|
|
|
datas []*Blob,
|
|
|
|
err error,
|
|
|
|
) {
|
|
|
|
if len(blobs) == 0 {
|
|
|
|
return 0, 0, 0, 0, 0, 0, nil, "", 0, nil, errors.New("blobs is empty")
|
|
|
|
}
|
|
|
|
indexParams = make(map[string]string)
|
|
|
|
datas = make([]*Blob, 0)
|
|
|
|
|
|
|
|
for _, blob := range blobs {
|
|
|
|
binlogReader, err := NewBinlogReader(blob.Value)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to read binlog",
|
|
|
|
zap.Error(err))
|
|
|
|
return 0, 0, 0, 0, 0, 0, nil, "", 0, nil, err
|
|
|
|
}
|
|
|
|
dataType := binlogReader.PayloadDataType
|
|
|
|
|
|
|
|
//desc, err := binlogReader.readDescriptorEvent()
|
|
|
|
//if err != nil {
|
|
|
|
// log.Warn("failed to read descriptor event",
|
|
|
|
// zap.Error(err))
|
|
|
|
// return 0, 0, 0, 0, 0, 0, nil, "", 0, nil, err
|
|
|
|
//}
|
|
|
|
desc := binlogReader.descriptorEvent
|
|
|
|
extraBytes := desc.ExtraBytes
|
|
|
|
extra := make(map[string]interface{})
|
|
|
|
_ = json.Unmarshal(extraBytes, &extra)
|
|
|
|
|
2021-12-08 21:11:39 +08:00
|
|
|
value, _ := strconv.Atoi(extra["indexBuildID"].(string))
|
2021-09-29 09:52:12 +08:00
|
|
|
indexBuildID = UniqueID(value)
|
|
|
|
|
|
|
|
value, _ = strconv.Atoi(extra["version"].(string))
|
|
|
|
version = int64(value)
|
|
|
|
|
|
|
|
collectionID = desc.CollectionID
|
|
|
|
partitionID = desc.PartitionID
|
|
|
|
segmentID = desc.SegmentID
|
|
|
|
fieldID = desc.FieldID
|
|
|
|
|
|
|
|
indexName = extra["indexName"].(string)
|
|
|
|
|
|
|
|
value, _ = strconv.Atoi(extra["indexID"].(string))
|
|
|
|
indexID = UniqueID(value)
|
|
|
|
|
|
|
|
key := extra["key"].(string)
|
|
|
|
|
|
|
|
for {
|
|
|
|
eventReader, err := binlogReader.NextEventReader()
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to get next event reader",
|
|
|
|
zap.Error(err))
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return 0, 0, 0, 0, 0, 0, nil, "", 0, nil, err
|
|
|
|
}
|
|
|
|
if eventReader == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
switch dataType {
|
2021-12-16 16:35:42 +08:00
|
|
|
case schemapb.DataType_Int8:
|
|
|
|
content, err := eventReader.GetByteFromPayload()
|
2021-09-29 09:52:12 +08:00
|
|
|
if err != nil {
|
2021-12-16 16:35:42 +08:00
|
|
|
log.Warn("failed to get string from payload",
|
2021-09-29 09:52:12 +08:00
|
|
|
zap.Error(err))
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
return 0, 0, 0, 0, 0, 0, nil, "", 0, nil, err
|
|
|
|
}
|
|
|
|
|
2021-10-09 19:27:02 +08:00
|
|
|
if key == IndexParamsKey {
|
2021-09-30 17:57:01 +08:00
|
|
|
_ = json.Unmarshal(content, &indexParams)
|
|
|
|
} else {
|
2021-12-16 16:35:42 +08:00
|
|
|
blob := &Blob{Key: key}
|
|
|
|
blob.Value = make([]byte, len(content))
|
|
|
|
copy(blob.Value, content)
|
|
|
|
datas = append(datas, blob)
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
eventReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
}
|
2021-12-08 21:11:39 +08:00
|
|
|
binlogReader.Close()
|
2021-09-29 09:52:12 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return indexBuildID, version, collectionID, partitionID, segmentID, fieldID, indexParams, indexName, indexID, datas, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (codec *IndexFileBinlogCodec) Deserialize(blobs []*Blob) (
|
|
|
|
datas []*Blob,
|
|
|
|
indexParams map[string]string,
|
|
|
|
indexName string,
|
|
|
|
indexID UniqueID,
|
|
|
|
err error,
|
|
|
|
) {
|
|
|
|
_, _, _, _, _, _, indexParams, indexName, indexID, datas, err = codec.DeserializeImpl(blobs)
|
|
|
|
return datas, indexParams, indexName, indexID, err
|
|
|
|
}
|
|
|
|
|
2021-12-15 21:13:23 +08:00
|
|
|
// IndexCodec can serialize and deserialize index
|
2020-12-11 11:29:07 +08:00
|
|
|
type IndexCodec struct {
|
2020-12-23 18:06:04 +08:00
|
|
|
}
|
|
|
|
|
2021-12-15 21:13:23 +08:00
|
|
|
// NewIndexCodec creates IndexCodec
|
2020-12-23 18:06:04 +08:00
|
|
|
func NewIndexCodec() *IndexCodec {
|
|
|
|
return &IndexCodec{}
|
2020-12-11 11:29:07 +08:00
|
|
|
}
|
|
|
|
|
2021-02-03 11:52:19 +08:00
|
|
|
func (indexCodec *IndexCodec) Serialize(blobs []*Blob, params map[string]string, indexName string, indexID UniqueID) ([]*Blob, error) {
|
|
|
|
paramsBytes, err := json.Marshal(struct {
|
|
|
|
Params map[string]string
|
|
|
|
IndexName string
|
|
|
|
IndexID UniqueID
|
|
|
|
}{
|
|
|
|
Params: params,
|
|
|
|
IndexName: indexName,
|
|
|
|
IndexID: indexID,
|
|
|
|
})
|
2021-01-28 17:25:43 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-10-09 19:27:02 +08:00
|
|
|
blobs = append(blobs, &Blob{Key: IndexParamsKey, Value: paramsBytes})
|
2020-12-11 11:29:07 +08:00
|
|
|
return blobs, nil
|
|
|
|
}
|
|
|
|
|
2021-02-03 11:52:19 +08:00
|
|
|
func (indexCodec *IndexCodec) Deserialize(blobs []*Blob) ([]*Blob, map[string]string, string, UniqueID, error) {
|
|
|
|
var file *Blob
|
2021-01-28 17:25:43 +08:00
|
|
|
for i := 0; i < len(blobs); i++ {
|
2021-10-09 19:27:02 +08:00
|
|
|
if blobs[i].Key != IndexParamsKey {
|
2021-01-28 17:25:43 +08:00
|
|
|
continue
|
|
|
|
}
|
2021-02-03 11:52:19 +08:00
|
|
|
file = blobs[i]
|
2021-01-28 17:25:43 +08:00
|
|
|
blobs = append(blobs[:i], blobs[i+1:]...)
|
|
|
|
break
|
|
|
|
}
|
2021-02-03 11:52:19 +08:00
|
|
|
if file == nil {
|
2021-07-16 17:19:55 +08:00
|
|
|
return nil, nil, "", InvalidUniqueID, fmt.Errorf("can not find params blob")
|
2021-01-28 17:25:43 +08:00
|
|
|
}
|
2021-02-03 11:52:19 +08:00
|
|
|
info := struct {
|
|
|
|
Params map[string]string
|
|
|
|
IndexName string
|
|
|
|
IndexID UniqueID
|
|
|
|
}{}
|
|
|
|
if err := json.Unmarshal(file.Value, &info); err != nil {
|
2021-07-16 17:19:55 +08:00
|
|
|
return nil, nil, "", InvalidUniqueID, fmt.Errorf("json unmarshal error: %s", err.Error())
|
2021-02-03 11:52:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return blobs, info.Params, info.IndexName, info.IndexID, nil
|
2020-12-11 11:29:07 +08:00
|
|
|
}
|