mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-29 18:38:44 +08:00
enhance: Fix lint issues from recent PRs (#34482)
See also #34483 Some lint issues are introduced due to lack of static check run. This PR fixes these problems. --------- Signed-off-by: Congqi Xia <congqi.xia@zilliz.com>
This commit is contained in:
parent
df5ba3fae3
commit
3333160b8d
@ -200,10 +200,8 @@ func AssembleImportRequest(task ImportTask, job ImportJob, meta *meta, alloc all
|
||||
return stat.GetTotalRows()
|
||||
})
|
||||
|
||||
var (
|
||||
// Allocated IDs are used for rowID and the BEGINNING of the logID.
|
||||
allocNum = totalRows + 1
|
||||
)
|
||||
// Allocated IDs are used for rowID and the BEGINNING of the logID.
|
||||
allocNum := totalRows + 1
|
||||
|
||||
idBegin, idEnd, err := alloc.allocN(allocNum)
|
||||
if err != nil {
|
||||
|
@ -957,7 +957,7 @@ func TestRetryInterval(t *testing.T) {
|
||||
})
|
||||
elapsed := time.Since(startTime)
|
||||
// expected (defaultRetryCount - 1) intervals of defaultRetryInterval
|
||||
expectedMin := defaultRetryInterval * time.Duration(defaultRetryCount-1)
|
||||
expectedMin := defaultRetryInterval * (defaultRetryCount - 1)
|
||||
expectedMax := expectedMin + (50 * time.Millisecond) // Allow 50ms margin for timing precision
|
||||
|
||||
if err == nil {
|
||||
|
@ -299,7 +299,6 @@ func (v *ParserVisitor) VisitMulDivMod(ctx *parser.MulDivModContext) interface{}
|
||||
return fmt.Errorf("modulo can only apply on integer types")
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
expr := &planpb.Expr{
|
||||
Expr: &planpb.Expr_BinaryArithExpr{
|
||||
|
@ -39,7 +39,7 @@ const (
|
||||
const version = "version"
|
||||
|
||||
// mark useMultiFieldFormat if there are multi fields in a log file
|
||||
const MULTI_FIELD = "MULTI_FIELD"
|
||||
const MultiField = "MULTI_FIELD"
|
||||
|
||||
type descriptorEventData struct {
|
||||
DescriptorEventDataFixPart
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"github.com/apache/arrow/go/v12/arrow"
|
||||
"github.com/apache/arrow/go/v12/arrow/array"
|
||||
"github.com/apache/arrow/go/v12/arrow/memory"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
@ -712,7 +713,7 @@ func (dsw *MultiFieldDeltalogStreamWriter) writeDeltalogHeaders(w io.Writer) err
|
||||
de := NewBaseDescriptorEvent(dsw.collectionID, dsw.partitionID, dsw.segmentID)
|
||||
de.PayloadDataType = schemapb.DataType_Int64
|
||||
de.descriptorEventData.AddExtra(originalSizeKey, strconv.Itoa(dsw.memorySize))
|
||||
de.descriptorEventData.AddExtra(version, MULTI_FIELD)
|
||||
de.descriptorEventData.AddExtra(version, MultiField)
|
||||
if err := de.Write(w); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -779,7 +780,7 @@ func NewDeltalogMultiFieldWriter(partitionID, segmentID UniqueID, eventWriter *M
|
||||
|
||||
for _, vv := range v {
|
||||
builder.Field(1).(*array.Int64Builder).Append(int64(vv.Ts))
|
||||
memorySize += uint64(vv.Ts)
|
||||
memorySize += vv.Ts
|
||||
}
|
||||
|
||||
arr := []arrow.Array{builder.Field(0).NewArray(), builder.Field(1).NewArray()}
|
||||
@ -849,14 +850,14 @@ func NewDeltalogDeserializeReader(blobs []*Blob) (*DeserializeReader[*DeleteLog]
|
||||
// check delta log description data to see if it is the format with
|
||||
// pk and ts column separately
|
||||
func supportMultiFieldFormat(blobs []*Blob) bool {
|
||||
if blobs != nil && len(blobs) > 0 {
|
||||
if len(blobs) > 0 {
|
||||
reader, err := NewBinlogReader(blobs[0].Value)
|
||||
defer reader.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
version := reader.descriptorEventData.Extras[version]
|
||||
return version != nil && version.(string) == MULTI_FIELD
|
||||
return version != nil && version.(string) == MultiField
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ func (suite *ReaderSuite) run(dt schemapb.DataType) {
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_FloatVector:
|
||||
@ -155,7 +155,7 @@ func (suite *ReaderSuite) run(dt schemapb.DataType) {
|
||||
chunked := lo.Chunk(rows, dim)
|
||||
chunkedRows := make([][dim]float32, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_Float16Vector, schemapb.DataType_BFloat16Vector:
|
||||
@ -164,7 +164,7 @@ func (suite *ReaderSuite) run(dt schemapb.DataType) {
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
default:
|
||||
@ -281,7 +281,7 @@ func (suite *ReaderSuite) failRun(dt schemapb.DataType, isDynamic bool) {
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_FloatVector:
|
||||
@ -289,7 +289,7 @@ func (suite *ReaderSuite) failRun(dt schemapb.DataType, isDynamic bool) {
|
||||
chunked := lo.Chunk(rows, dim)
|
||||
chunkedRows := make([][dim]float32, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_Float16Vector, schemapb.DataType_BFloat16Vector:
|
||||
@ -298,7 +298,7 @@ func (suite *ReaderSuite) failRun(dt schemapb.DataType, isDynamic bool) {
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
default:
|
||||
|
@ -114,8 +114,8 @@ func Test_baseChecker_CheckValidDataType(t *testing.T) {
|
||||
|
||||
c := newBaseChecker()
|
||||
for _, test := range cases {
|
||||
field_schema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(field_schema)
|
||||
fieldSchema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(fieldSchema)
|
||||
if test.errIsNil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
@ -136,8 +136,8 @@ func Test_binFlatChecker_CheckValidDataType(t *testing.T) {
|
||||
|
||||
c := newBinFlatChecker()
|
||||
for _, test := range cases {
|
||||
field_schema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(field_schema)
|
||||
fieldSchema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(fieldSchema)
|
||||
if test.errIsNil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
@ -187,8 +187,8 @@ func Test_binIVFFlatChecker_CheckValidDataType(t *testing.T) {
|
||||
|
||||
c := newBinIVFFlatChecker()
|
||||
for _, test := range cases {
|
||||
field_schema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(field_schema)
|
||||
fieldSchema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(fieldSchema)
|
||||
if test.errIsNil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
@ -69,8 +69,8 @@ func Test_binaryVectorBaseChecker_CheckValidDataType(t *testing.T) {
|
||||
|
||||
c := newBinaryVectorBaseChecker()
|
||||
for _, test := range cases {
|
||||
field_schema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(field_schema)
|
||||
fieldSchema := &schemapb.FieldSchema{DataType: test.dType}
|
||||
err := c.CheckValidDataType(fieldSchema)
|
||||
if test.errIsNil {
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
|
@ -21,15 +21,15 @@ func (c *BITMAPChecker) CheckTrain(params map[string]string) error {
|
||||
}
|
||||
|
||||
func (c *BITMAPChecker) CheckValidDataType(field *schemapb.FieldSchema) error {
|
||||
main_type := field.GetDataType()
|
||||
elem_type := field.GetElementType()
|
||||
if !typeutil.IsBoolType(main_type) && !typeutil.IsIntegerType(main_type) &&
|
||||
!typeutil.IsStringType(main_type) && !typeutil.IsArrayType(main_type) {
|
||||
mainType := field.GetDataType()
|
||||
elemType := field.GetElementType()
|
||||
if !typeutil.IsBoolType(mainType) && !typeutil.IsIntegerType(mainType) &&
|
||||
!typeutil.IsStringType(mainType) && !typeutil.IsArrayType(mainType) {
|
||||
return fmt.Errorf("bitmap index are only supported on bool, int, string and array field")
|
||||
}
|
||||
if typeutil.IsArrayType(main_type) {
|
||||
if !typeutil.IsBoolType(elem_type) && !typeutil.IsIntegerType(elem_type) &&
|
||||
!typeutil.IsStringType(elem_type) {
|
||||
if typeutil.IsArrayType(mainType) {
|
||||
if !typeutil.IsBoolType(elemType) && !typeutil.IsIntegerType(elemType) &&
|
||||
!typeutil.IsStringType(elemType) {
|
||||
return fmt.Errorf("bitmap index are only supported on bool, int, string for array field")
|
||||
}
|
||||
}
|
||||
|
@ -175,4 +175,4 @@ func CheckQueryResult(t *testing.T, expColumns []column.Column, actualColumns []
|
||||
log.Error("CheckQueryResult actualColumns no column", zap.String("name", expColumn.Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,10 +2,10 @@ package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
clientv2 "github.com/milvus-io/milvus/client/v2"
|
||||
|
@ -64,7 +64,6 @@ var UnsupportedSparseVecMetricsType = []entity.MetricType{
|
||||
entity.SUPERSTRUCTURE,
|
||||
}
|
||||
|
||||
|
||||
// GenAllFloatIndex gen all float vector index
|
||||
func GenAllFloatIndex(metricType entity.MetricType) []index.Index {
|
||||
nlist := 128
|
||||
@ -94,4 +93,4 @@ func SupportScalarIndexFieldType(field entity.FieldType) bool {
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -5,14 +5,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/client/v2"
|
||||
"github.com/milvus-io/milvus/client/v2/entity"
|
||||
"github.com/milvus-io/milvus/client/v2/index"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/tests/go_client/common"
|
||||
hp "github.com/milvus-io/milvus/tests/go_client/testcases/helper"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestIndexVectorDefault(t *testing.T) {
|
||||
@ -129,13 +130,12 @@ func TestIndexAutoFloatVector(t *testing.T) {
|
||||
prepare.InsertData(ctx, t, mc, ip, hp.TNewDataOption())
|
||||
prepare.FlushData(ctx, t, mc, schema.CollectionName)
|
||||
|
||||
|
||||
for _, invalidMt := range hp.SupportBinFlatMetricType {
|
||||
idx := index.NewAutoIndex(invalidMt)
|
||||
_, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, idx))
|
||||
common.CheckErr(t, err, false, fmt.Sprintf("float vector index does not support metric type: %s", invalidMt))
|
||||
}
|
||||
// auto index with different metric type on float vec
|
||||
// auto index with different metric type on float vec
|
||||
for _, mt := range hp.SupportFloatMetricType {
|
||||
idx := index.NewAutoIndex(mt)
|
||||
indexTask, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, idx))
|
||||
@ -248,7 +248,7 @@ func TestCreateAutoIndexAllFields(t *testing.T) {
|
||||
var expFields []string
|
||||
var idx index.Index
|
||||
for _, field := range schema.Fields {
|
||||
if field.DataType == entity.FieldTypeArray || field.DataType == entity.FieldTypeJSON{
|
||||
if field.DataType == entity.FieldTypeArray || field.DataType == entity.FieldTypeJSON {
|
||||
idx = index.NewAutoIndex(entity.IP)
|
||||
_, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, field.Name, idx))
|
||||
common.CheckErr(t, err, false, fmt.Sprintf("create auto index on %s field is not supported", field.DataType))
|
||||
@ -549,8 +549,10 @@ func TestCreateScalarIndexVectorField(t *testing.T) {
|
||||
prepare.FlushData(ctx, t, mc, schema.CollectionName)
|
||||
|
||||
for _, idx := range []index.Index{index.NewInvertedIndex(), index.NewSortedIndex(), index.NewTrieIndex()} {
|
||||
for _, fieldName := range []string{common.DefaultFloatVecFieldName, common.DefaultBinaryVecFieldName,
|
||||
common.DefaultBFloat16VecFieldName, common.DefaultFloat16VecFieldName} {
|
||||
for _, fieldName := range []string{
|
||||
common.DefaultFloatVecFieldName, common.DefaultBinaryVecFieldName,
|
||||
common.DefaultBFloat16VecFieldName, common.DefaultFloat16VecFieldName,
|
||||
} {
|
||||
_, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, fieldName, idx))
|
||||
common.CheckErr(t, err, false, "metric type not set for vector index")
|
||||
}
|
||||
@ -607,7 +609,7 @@ func TestCreateIndexJsonField(t *testing.T) {
|
||||
_, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultJSONFieldName, idx).WithIndexName("json_index"))
|
||||
common.CheckErr(t, err, false, "data type should be FloatVector, Float16Vector or BFloat16Vector")
|
||||
|
||||
//create scalar index on json field
|
||||
// create scalar index on json field
|
||||
type scalarIndexError struct {
|
||||
idx index.Index
|
||||
errMsg string
|
||||
@ -1050,6 +1052,7 @@ func TestCreateIndexAsync(t *testing.T) {
|
||||
common.CheckErr(t, err, true)
|
||||
|
||||
idx, err := mc.DescribeIndex(ctx, client.NewDescribeIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName))
|
||||
common.CheckErr(t, err, true)
|
||||
log.Debug("describe index", zap.Any("descIdx", idx))
|
||||
}
|
||||
|
||||
@ -1071,6 +1074,7 @@ func TestIndexMultiVectorDupName(t *testing.T) {
|
||||
idxTask, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, idx).WithIndexName("index_1"))
|
||||
common.CheckErr(t, err, true)
|
||||
err = idxTask.Await(ctx)
|
||||
common.CheckErr(t, err, true)
|
||||
|
||||
_, err = mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloat16VecFieldName, idx).WithIndexName("index_1"))
|
||||
common.CheckErr(t, err, false, "CreateIndex failed: at most one distinct index is allowed per field")
|
||||
@ -1099,6 +1103,7 @@ func TestDropIndex(t *testing.T) {
|
||||
idxTask, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, idx).WithIndexName(idxName))
|
||||
common.CheckErr(t, err, true)
|
||||
err = idxTask.Await(ctx)
|
||||
common.CheckErr(t, err, true)
|
||||
|
||||
// describe index with fieldName -> not found
|
||||
_, errNotFound := mc.DescribeIndex(ctx, client.NewDescribeIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName))
|
||||
@ -1106,6 +1111,7 @@ func TestDropIndex(t *testing.T) {
|
||||
|
||||
// describe index with index name -> ok
|
||||
descIdx, err := mc.DescribeIndex(ctx, client.NewDescribeIndexOption(schema.CollectionName, idxName))
|
||||
common.CheckErr(t, err, true)
|
||||
require.EqualValues(t, index.NewGenericIndex(idxName, idx.Params()), descIdx)
|
||||
|
||||
// drop index with field name
|
||||
@ -1142,6 +1148,7 @@ func TestDropIndexCreateIndexWithIndexName(t *testing.T) {
|
||||
idxTask, err := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, idx).WithIndexName(idxName))
|
||||
common.CheckErr(t, err, true)
|
||||
err = idxTask.Await(ctx)
|
||||
common.CheckErr(t, err, true)
|
||||
descIdx, err := mc.DescribeIndex(ctx, client.NewDescribeIndexOption(schema.CollectionName, idxName))
|
||||
common.CheckErr(t, err, true)
|
||||
require.EqualValues(t, index.NewGenericIndex(idxName, idx.Params()), descIdx)
|
||||
@ -1158,6 +1165,7 @@ func TestDropIndexCreateIndexWithIndexName(t *testing.T) {
|
||||
idxTask, err2 := mc.CreateIndex(ctx, client.NewCreateIndexOption(schema.CollectionName, common.DefaultFloatVecFieldName, ipIdx).WithIndexName(idxName))
|
||||
common.CheckErr(t, err2, true)
|
||||
err = idxTask.Await(ctx)
|
||||
common.CheckErr(t, err, true)
|
||||
descIdx2, err2 := mc.DescribeIndex(ctx, client.NewDescribeIndexOption(schema.CollectionName, idxName))
|
||||
common.CheckErr(t, err2, true)
|
||||
require.EqualValues(t, index.NewGenericIndex(idxName, ipIdx.Params()), descIdx2)
|
||||
|
@ -545,7 +545,8 @@ func TestSearchInvalidScannReorderK(t *testing.T) {
|
||||
prepare.InsertData(ctx, t, mc, hp.NewInsertParams(schema, 500), hp.TNewDataOption())
|
||||
prepare.FlushData(ctx, t, mc, schema.CollectionName)
|
||||
prepare.CreateIndex(ctx, t, mc, hp.TNewIndexParams(schema).TWithFieldIndex(map[string]index.Index{
|
||||
common.DefaultFloatVecFieldName: index.NewSCANNIndex(entity.COSINE, 16, true)}))
|
||||
common.DefaultFloatVecFieldName: index.NewSCANNIndex(entity.COSINE, 16, true),
|
||||
}))
|
||||
prepare.Load(ctx, t, mc, hp.NewLoadParams(schema.CollectionName))
|
||||
|
||||
// search with invalid reorder_k < topK
|
||||
|
@ -133,7 +133,7 @@ func GenerateNumpyFiles(cm storage.ChunkManager, schema *schemapb.CollectionSche
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_FloatVector:
|
||||
@ -144,7 +144,7 @@ func GenerateNumpyFiles(cm storage.ChunkManager, schema *schemapb.CollectionSche
|
||||
chunked := lo.Chunk(rows, dim)
|
||||
chunkedRows := make([][dim]float32, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_Float16Vector:
|
||||
@ -156,7 +156,7 @@ func GenerateNumpyFiles(cm storage.ChunkManager, schema *schemapb.CollectionSche
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_BFloat16Vector:
|
||||
@ -168,7 +168,7 @@ func GenerateNumpyFiles(cm storage.ChunkManager, schema *schemapb.CollectionSche
|
||||
chunked := lo.Chunk(rows, rowBytes)
|
||||
chunkedRows := make([][rowBytes]byte, len(chunked))
|
||||
for i, innerSlice := range chunked {
|
||||
copy(chunkedRows[i][:], innerSlice[:])
|
||||
copy(chunkedRows[i][:], innerSlice)
|
||||
}
|
||||
data = chunkedRows
|
||||
case schemapb.DataType_SparseFloatVector:
|
||||
|
Loading…
Reference in New Issue
Block a user