Refine errors to remove changes breaking design (#26521)

Signed-off-by: yah01 <yah2er0ne@outlook.com>
This commit is contained in:
yah01 2023-09-04 09:57:09 +08:00 committed by GitHub
parent c6024a32f5
commit 3349db4aa7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 2368 additions and 2113 deletions

View File

@ -58,8 +58,8 @@ func (rc *RootCoord) Run() error {
// Stop terminates service
func (rc *RootCoord) Stop() error {
if err := rc.svr.Stop(); err != nil {
return err
if rc.svr != nil {
return rc.svr.Stop()
}
return nil
}

View File

@ -19,12 +19,13 @@ import (
"context"
"time"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"go.uber.org/zap"
)
@ -145,12 +146,9 @@ func (b *CoordinatorBroker) HasCollection(ctx context.Context, collectionID int6
if resp == nil {
return false, errNilResponse
}
if resp.Status.ErrorCode == commonpb.ErrorCode_Success {
return true, nil
}
statusErr := common.NewStatusError(resp.Status.ErrorCode, resp.Status.Reason)
if common.IsCollectionNotExistError(statusErr) {
err = merr.Error(resp.GetStatus())
if errors.Is(err, merr.ErrCollectionNotFound) {
return false, nil
}
return false, statusErr
return err == nil, err
}

View File

@ -258,10 +258,8 @@ func (s *Server) GetIndexState(ctx context.Context, req *indexpb.GetIndexStateRe
}, nil
}
ret := &indexpb.GetIndexStateResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
State: commonpb.IndexState_Finished,
Status: merr.Status(nil),
State: commonpb.IndexState_Finished,
}
indexInfo := &indexpb.IndexInfo{
@ -301,9 +299,7 @@ func (s *Server) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegme
}
ret := &indexpb.GetSegmentIndexStateResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
States: make([]*indexpb.SegmentIndexState, 0),
}
indexID2CreateTs := s.meta.GetIndexIDByName(req.GetCollectionID(), req.GetIndexName())
@ -510,9 +506,7 @@ func (s *Server) GetIndexBuildProgress(ctx context.Context, req *indexpb.GetInde
log.Info("GetIndexBuildProgress success", zap.Int64("collectionID", req.GetCollectionID()),
zap.String("indexName", req.GetIndexName()))
return &indexpb.GetIndexBuildProgressResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IndexedRows: indexInfo.IndexedRows,
TotalRows: indexInfo.TotalRows,
PendingIndexRows: indexInfo.PendingIndexRows,
@ -580,9 +574,7 @@ func (s *Server) DescribeIndex(ctx context.Context, req *indexpb.DescribeIndexRe
}
log.Info("DescribeIndex success", zap.String("indexName", req.GetIndexName()))
return &indexpb.DescribeIndexResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IndexInfos: indexInfos,
}, nil
}
@ -640,9 +632,7 @@ func (s *Server) GetIndexStatistics(ctx context.Context, req *indexpb.GetIndexSt
log.Debug("GetIndexStatisticsResponse success",
zap.String("indexName", req.GetIndexName()))
return &indexpb.GetIndexStatisticsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IndexInfos: indexInfos,
}, nil
}
@ -668,9 +658,7 @@ func (s *Server) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (
return errResp, nil
}
ret := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
ret := merr.Status(nil)
indexes := s.meta.GetIndexesForCollection(req.GetCollectionID(), req.GetIndexName())
if len(indexes) == 0 {
@ -722,9 +710,7 @@ func (s *Server) GetIndexInfos(ctx context.Context, req *indexpb.GetIndexInfoReq
}, nil
}
ret := &indexpb.GetIndexInfoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
SegmentInfo: map[int64]*indexpb.SegmentInfo{},
}

View File

@ -18,11 +18,11 @@ package datacoord
import (
"context"
"fmt"
"sync/atomic"
"time"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
clientv3 "go.etcd.io/etcd/client/v3"
@ -395,10 +395,9 @@ func (m *mockRootCoordService) HasCollection(ctx context.Context, req *milvuspb.
func (m *mockRootCoordService) DescribeCollection(ctx context.Context, req *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
// return not exist
if req.CollectionID == -1 {
err := common.NewCollectionNotExistError(fmt.Sprintf("can't find collection: %d", req.CollectionID))
err := merr.WrapErrCollectionNotFound(req.GetCollectionID())
return &milvuspb.DescribeCollectionResponse{
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: err.Error()},
Status: merr.Status(err),
}, nil
}
return &milvuspb.DescribeCollectionResponse{

View File

@ -2455,7 +2455,7 @@ func TestShouldDropChannel(t *testing.T) {
//myRoot.code = commonpb.ErrorCode_CollectionNotExists
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
Return(&milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_CollectionNotExists},
Status: merr.Status(merr.WrapErrCollectionNotFound(-1)),
CollectionID: -1,
}, nil).Once()
assert.True(t, svr.handler.CheckShouldDropChannel("ch99", -1))
@ -2482,7 +2482,7 @@ func TestShouldDropChannel(t *testing.T) {
t.Run("collection name in kv, collection not exist", func(t *testing.T) {
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
Return(&milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_CollectionNotExists},
Status: merr.Status(merr.WrapErrCollectionNotFound(-1)),
CollectionID: -1,
}, nil).Once()
assert.True(t, svr.handler.CheckShouldDropChannel("ch1", -1))

View File

@ -54,10 +54,8 @@ func (s *Server) isClosed() bool {
// GetTimeTickChannel legacy API, returns time tick channel name
func (s *Server) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Value: Params.CommonCfg.DataCoordTimeTick.GetValue(),
Status: merr.Status(nil),
Value: Params.CommonCfg.DataCoordTimeTick.GetValue(),
}, nil
}
@ -203,18 +201,13 @@ func (s *Server) AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentI
CollectionID: r.CollectionID,
PartitionID: r.PartitionID,
ExpireTime: allocation.ExpireTime,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
}
assigns = append(assigns, result)
}
}
return &datapb.AssignSegmentIDResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
SegIDAssignments: assigns,
}, nil
}
@ -340,10 +333,8 @@ func (s *Server) GetPartitionStatistics(ctx context.Context, req *datapb.GetPart
// GetSegmentInfoChannel legacy API, returns segment info statistics channel
func (s *Server) GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Value: Params.CommonCfg.DataCoordSegmentInfo.GetValue(),
Status: merr.Status(nil),
Value: Params.CommonCfg.DataCoordSegmentInfo.GetValue(),
}, nil
}
@ -591,9 +582,7 @@ func (s *Server) SetSegmentState(ctx context.Context, req *datapb.SetSegmentStat
}, nil
}
return &datapb.SetSegmentStateResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
}
@ -619,10 +608,7 @@ func (s *Server) GetComponentStates(ctx context.Context) (*milvuspb.ComponentSta
Role: "datacoord",
StateCode: code,
},
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
}
return resp, nil
}
@ -974,10 +960,7 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
}
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Configuations: configList,
}, nil
}
@ -1400,9 +1383,7 @@ func (s *Server) UpdateSegmentStatistics(ctx context.Context, req *datapb.Update
return resp, nil
}
s.updateSegmentStatistics(req.GetStats())
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
// UpdateChannelCheckpoint updates channel checkpoint in dataCoord.
@ -1424,9 +1405,7 @@ func (s *Server) UpdateChannelCheckpoint(ctx context.Context, req *datapb.Update
return resp, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
// ReportDataNodeTtMsgs send datenode timetick messages to dataCoord.
@ -1590,9 +1569,7 @@ func (s *Server) SaveImportSegment(ctx context.Context, req *datapb.SaveImportSe
Reason: err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
// UnsetIsImportingState unsets the isImporting states of the given segments.
@ -1615,9 +1592,7 @@ func (s *Server) UnsetIsImportingState(ctx context.Context, req *datapb.UnsetIsI
Reason: reportErr.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
// MarkSegmentsDropped marks the given segments as `Dropped`.
@ -1638,9 +1613,7 @@ func (s *Server) MarkSegmentsDropped(ctx context.Context, req *datapb.MarkSegmen
ErrorCode: commonpb.ErrorCode_UnexpectedError,
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
func (s *Server) BroadcastAlteredCollection(ctx context.Context, req *datapb.AlterCollectionRequest) (*commonpb.Status, error) {
@ -1674,16 +1647,12 @@ func (s *Server) BroadcastAlteredCollection(ctx context.Context, req *datapb.Alt
Properties: properties,
}
s.meta.AddCollection(collInfo)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
clonedColl.Properties = properties
s.meta.AddCollection(clonedColl)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
func (s *Server) CheckHealth(ctx context.Context, req *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {

View File

@ -22,6 +22,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/ratelimitutil"
@ -131,10 +132,7 @@ func (node *DataNode) getSystemInfoMetrics(ctx context.Context, req *milvuspb.Ge
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.DataNodeRole, paramtable.GetNodeID()),
}, nil

View File

@ -417,9 +417,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
}()
importResult := &rootcoordpb.ImportResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
TaskId: req.GetImportTask().TaskId,
DatanodeId: paramtable.GetNodeID(),
State: commonpb.ImportState_ImportStarted,
@ -519,9 +517,7 @@ func (node *DataNode) Import(ctx context.Context, req *datapb.ImportTaskRequest)
return returnFailFunc("failed to import files", err)
}
resp := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
resp := merr.Status(nil)
return resp, nil
}
@ -654,9 +650,7 @@ func (node *DataNode) AddImportSegment(ctx context.Context, req *datapb.AddImpor
}
ds.flushingSegCache.Remove(req.GetSegmentId())
return &datapb.AddImportSegmentResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
ChannelPos: posID,
}, nil
}
@ -705,9 +699,7 @@ func assignSegmentFunc(node *DataNode, req *datapb.ImportTaskRequest) importutil
// ignore the returned error, since even report failed the segments still can be cleaned
// retry 10 times, if the rootcoord is down, the report function will cost 20+ seconds
importResult := &rootcoordpb.ImportResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
TaskId: req.GetImportTask().TaskId,
DatanodeId: paramtable.GetNodeID(),
State: commonpb.ImportState_ImportStarted,

View File

@ -1058,10 +1058,7 @@ func (s *Server) GetProxyMetrics(ctx context.Context, request *milvuspb.GetMetri
func (s *Server) GetVersion(ctx context.Context, request *milvuspb.GetVersionRequest) (*milvuspb.GetVersionResponse, error) {
buildTags := os.Getenv(metricsinfo.GitBuildTagsEnvKey)
return &milvuspb.GetVersionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Version: buildTags,
}, nil
}

View File

@ -56,6 +56,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/lifetime"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -288,9 +289,7 @@ func (i *IndexNode) GetComponentStates(ctx context.Context) (*milvuspb.Component
ret := &milvuspb.ComponentStates{
State: stateInfo,
SubcomponentStates: nil, // todo add subcomponents states
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}
log.RatedInfo(10, "IndexNode Component states",
@ -305,9 +304,7 @@ func (i *IndexNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringRes
log.RatedInfo(10, "get IndexNode time tick channel ...")
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
}
@ -315,9 +312,7 @@ func (i *IndexNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringRes
func (i *IndexNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
log.RatedInfo(10, "get IndexNode statistics channel ...")
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
}
@ -352,10 +347,7 @@ func (i *IndexNode) ShowConfigurations(ctx context.Context, req *internalpb.Show
}
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Configuations: configList,
}, nil
}

View File

@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -84,22 +85,16 @@ func NewIndexNodeMock() *Mock {
StateCode: commonpb.StateCode_Healthy,
},
SubcomponentStates: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
},
CallGetStatisticsChannel: func(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
},
CallCreateJob: func(ctx context.Context, req *indexpb.CreateJobRequest) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
},
CallQueryJobs: func(ctx context.Context, in *indexpb.QueryJobsRequest) (*indexpb.QueryJobsResponse, error) {
indexInfos := make([]*indexpb.IndexTaskInfo, 0)
@ -111,23 +106,17 @@ func NewIndexNodeMock() *Mock {
})
}
return &indexpb.QueryJobsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
ClusterID: in.ClusterID,
IndexInfos: indexInfos,
}, nil
},
CallDropJobs: func(ctx context.Context, in *indexpb.DropJobsRequest) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
},
CallGetJobStats: func(ctx context.Context, in *indexpb.GetJobStatsRequest) (*indexpb.GetJobStatsResponse, error) {
return &indexpb.GetJobStatsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
TotalJobNum: 1,
EnqueueJobNum: 0,
InProgressJobNum: 1,
@ -148,9 +137,7 @@ func NewIndexNodeMock() *Mock {
},
CallShowConfigurations: func(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) {
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}, nil
},
}
@ -252,10 +239,7 @@ func getMockSystemInfoMetrics(
resp, _ := metricsinfo.MarshalComponentInfos(nodeInfos)
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
}, nil

View File

@ -34,6 +34,7 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
@ -103,10 +104,7 @@ func (i *IndexNode) CreateJob(ctx context.Context, req *indexpb.CreateJobRequest
tr: timerecord.NewTimeRecorder(fmt.Sprintf("IndexBuildID: %d, ClusterID: %s", req.BuildID, req.ClusterID)),
serializedSize: 0,
}
ret := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}
ret := merr.Status(nil)
if err := i.sched.IndexBuildQueue.Enqueue(task); err != nil {
log.Ctx(ctx).Warn("IndexNode failed to schedule", zap.Int64("IndexBuildID", req.BuildID), zap.String("ClusterID", req.ClusterID), zap.Error(err))
ret.ErrorCode = commonpb.ErrorCode_UnexpectedError
@ -146,10 +144,7 @@ func (i *IndexNode) QueryJobs(ctx context.Context, req *indexpb.QueryJobsRequest
}
})
ret := &indexpb.QueryJobsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
ClusterID: req.ClusterID,
IndexInfos: make([]*indexpb.IndexTaskInfo, 0, len(req.BuildIDs)),
}
@ -196,10 +191,7 @@ func (i *IndexNode) DropJobs(ctx context.Context, req *indexpb.DropJobsRequest)
}
log.Ctx(ctx).Info("drop index build jobs success", zap.String("ClusterID", req.ClusterID),
zap.Int64s("IndexBuildIDs", req.BuildIDs))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
return merr.Status(nil), nil
}
func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsRequest) (*indexpb.GetJobStatsResponse, error) {
@ -227,10 +219,7 @@ func (i *IndexNode) GetJobStats(ctx context.Context, req *indexpb.GetJobStatsReq
}
log.Ctx(ctx).Info("Get Index Job Stats", zap.Int("Unissued", unissued), zap.Int("Active", active), zap.Int("Slot", slots))
return &indexpb.GetJobStatsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
TotalJobNum: int64(active) + int64(unissued),
InProgressJobNum: int64(active),
EnqueueJobNum: int64(unissued),

View File

@ -22,6 +22,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -73,10 +74,7 @@ func getSystemInfoMetrics(
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.IndexNodeRole, paramtable.GetNodeID()),
}, nil

View File

@ -19,6 +19,7 @@ import (
"github.com/milvus-io/milvus/pkg/util/crypto"
"github.com/milvus-io/milvus/pkg/util/etcd"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
"go.uber.org/zap"
)
@ -189,7 +190,7 @@ func (kc *Catalog) loadCollectionFromDb(ctx context.Context, dbID int64, collect
collKey := BuildCollectionKey(dbID, collectionID)
collVal, err := kc.Snapshot.Load(collKey, ts)
if err != nil {
return nil, common.NewCollectionNotExistError(fmt.Sprintf("collection not found: %d, error: %s", collectionID, err.Error()))
return nil, merr.WrapErrCollectionNotFound(collectionID, err.Error())
}
collMeta := &pb.CollectionInfo{}
@ -592,7 +593,7 @@ func (kc *Catalog) GetCollectionByName(ctx context.Context, dbID int64, collecti
}
}
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection %d:%s, at timestamp = %d", dbID, collectionName, ts))
return nil, merr.WrapErrCollectionNotFoundWithDB(dbID, collectionName, fmt.Sprintf("timestample = %d", ts))
}
func (kc *Catalog) ListCollections(ctx context.Context, dbID int64, ts typeutil.Timestamp) ([]*model.Collection, error) {

View File

@ -8,6 +8,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -40,9 +41,7 @@ func (r *defaultLimitReducer) afterReduce(result *milvuspb.QueryResults) error {
result.CollectionName = collectionName
if len(result.FieldsData) > 0 {
result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
result.Status = merr.Status(nil)
} else {
result.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_EmptyCollection,

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,7 @@ package proxy
import (
"context"
"github.com/cockroachdb/errors"
"github.com/samber/lo"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
@ -167,7 +168,7 @@ func (lb *LBPolicyImpl) ExecuteWithRetry(ctx context.Context, workload ChannelWo
// cancel work load which assign to the target node
lb.balancer.CancelWorkload(targetNode, workload.nq)
return merr.WrapErrShardDelegatorAccessFailed(workload.channel, err.Error())
return errors.Wrapf(err, "failed to get delegator %d for channel %s", targetNode, workload.channel)
}
err = workload.exec(ctx, targetNode, client, workload.channel)
@ -178,11 +179,7 @@ func (lb *LBPolicyImpl) ExecuteWithRetry(ctx context.Context, workload ChannelWo
excludeNodes.Insert(targetNode)
lb.balancer.CancelWorkload(targetNode, workload.nq)
if err == context.Canceled || err == context.DeadlineExceeded {
return merr.WrapErrShardDelegatorSQTimeout(workload.channel, err.Error())
}
return merr.WrapErrShardDelegatorSQFailed(workload.channel, err.Error())
return errors.Wrapf(err, "failed to search/query delegator %d for channel %s", targetNode, workload.channel)
}
lb.balancer.CancelWorkload(targetNode, workload.nq)

View File

@ -360,7 +360,7 @@ func (s *LBPolicySuite) TestExecuteWithRetry() {
},
retryTimes: 2,
})
s.ErrorIs(err, merr.ErrShardDelegatorSQTimeout)
s.True(merr.IsCanceledOrTimeout(err))
}
func (s *LBPolicySuite) TestExecute() {

View File

@ -579,8 +579,9 @@ func (m *MetaCache) describeCollection(ctx context.Context, database, collection
if err != nil {
return nil, err
}
if coll.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, common.NewStatusError(coll.GetStatus().GetErrorCode(), coll.GetStatus().GetReason())
err = merr.Error(coll.GetStatus())
if err != nil {
return nil, err
}
resp := &milvuspb.DescribeCollectionResponse{
Status: coll.Status,

View File

@ -40,6 +40,7 @@ import (
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/util/crypto"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -155,12 +156,9 @@ func (m *MockRootCoordClientInterface) DescribeCollection(ctx context.Context, i
}, nil
}
err := fmt.Errorf("can't find collection: " + in.CollectionName)
err := merr.WrapErrCollectionNotFound(in.CollectionName)
return &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_CollectionNotExists,
Reason: "describe collection failed: " + err.Error(),
},
Status: merr.Status(err),
Schema: nil,
}, nil
}

View File

@ -24,6 +24,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/ratelimitutil"
@ -107,9 +108,7 @@ func getProxyMetrics(ctx context.Context, request *milvuspb.GetMetricsRequest, n
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
}, nil
@ -429,10 +428,7 @@ func getSystemInfoMetrics(
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.ProxyRole, paramtable.GetNodeID()),
}, nil

View File

@ -503,9 +503,7 @@ func (dct *describeCollectionTask) PreExecute(ctx context.Context) error {
func (dct *describeCollectionTask) Execute(ctx context.Context) error {
var err error
dct.result = &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
Schema: &schemapb.CollectionSchema{
Name: "",
Description: "",
@ -526,6 +524,13 @@ func (dct *describeCollectionTask) Execute(ctx context.Context) error {
if result.Status.ErrorCode != commonpb.ErrorCode_Success {
dct.result.Status = result.Status
// compatibility with PyMilvus existing implementation
err := merr.Error(dct.result.GetStatus())
if errors.Is(err, merr.ErrCollectionNotFound) {
dct.result.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
dct.result.Status.Reason = "can't find collection " + dct.result.Status.Reason
}
} else {
dct.result.Schema.Name = result.Schema.Name
dct.result.Schema.Description = result.Schema.Description
@ -1306,10 +1311,7 @@ func (ft *flushTask) Execute(ctx context.Context) error {
coll2SealTimes[collName] = resp.GetTimeOfSeal()
}
ft.result = &milvuspb.FlushResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
DbName: ft.GetDbName(),
CollSegIDs: coll2Segments,
FlushCollSegIDs: flushColl2Segments,
@ -2218,7 +2220,7 @@ func (t *DescribeResourceGroupTask) Execute(ctx context.Context) error {
zap.Error(err))
// if collection has been dropped, skip it
if common.IsCollectionNotExistError(err) {
if errors.Is(err, merr.ErrCollectionNotFound) {
continue
}
return nil, err

View File

@ -20,6 +20,7 @@ import (
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -154,9 +155,7 @@ func (dt *deleteTask) PreExecute(ctx context.Context) error {
dt.deleteMsg.Base.SourceID = paramtable.GetNodeID()
dt.result = &milvuspb.MutationResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IDs: &schemapb.IDs{
IdField: nil,
},

View File

@ -915,10 +915,7 @@ func (gist *getIndexStateTask) Execute(ctx context.Context) error {
}
gist.result = &milvuspb.GetIndexStateResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
State: state.GetState(),
FailReason: state.GetFailReason(),
}

View File

@ -15,6 +15,7 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -97,9 +98,7 @@ func (it *insertTask) PreExecute(ctx context.Context) error {
defer sp.End()
it.result = &milvuspb.MutationResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IDs: &schemapb.IDs{
IdField: nil,
},

View File

@ -24,7 +24,6 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/tsoutil"
@ -415,7 +414,7 @@ func (t *queryTask) Execute(ctx context.Context) error {
})
if err != nil {
log.Warn("fail to execute query", zap.Error(err))
return merr.WrapErrShardDelegatorQueryFailed(err.Error())
return errors.Wrap(err, "failed to query")
}
log.Debug("Query Execute done.")

View File

@ -420,7 +420,7 @@ func (t *searchTask) Execute(ctx context.Context) error {
})
if err != nil {
log.Warn("search execute failed", zap.Error(err))
return merr.WrapErrShardDelegatorSearchFailed(err.Error())
return errors.Wrap(err, "failed to search")
}
log.Debug("Search Execute done.",
@ -770,9 +770,7 @@ func reduceSearchResultData(ctx context.Context, subSearchResultData []*schemapb
zap.String("metricType", metricType))
ret := &milvuspb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
Results: &schemapb.SearchResultData{
NumQueries: nq,
TopK: topk,

View File

@ -216,7 +216,7 @@ func (g *getStatisticsTask) PostExecute(ctx context.Context) error {
return err
}
g.result = &milvuspb.GetStatisticsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
Stats: result,
}
@ -248,7 +248,7 @@ func (g *getStatisticsTask) getStatisticsFromDataCoord(ctx context.Context) erro
g.resultBuf = typeutil.NewConcurrentSet[*internalpb.GetStatisticsResponse]()
}
g.resultBuf.Insert(&internalpb.GetStatisticsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
Stats: result.Stats,
})
return nil
@ -268,7 +268,7 @@ func (g *getStatisticsTask) getStatisticsFromQueryNode(ctx context.Context) erro
})
if err != nil {
return merr.WrapErrShardDelegatorStatisticFailed(err.Error())
return errors.Wrap(err, "failed to statistic")
}
return nil
@ -466,7 +466,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return errors.New(result.Status.Reason)
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
// Status: merr.Status(nil),
// Stats: result.Stats,
// })
// log.Debug("get partition statistics from DataCoord execute done", zap.Int64("msgID", g.ID()))
@ -481,7 +481,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return err
// }
// g.result = &milvuspb.GetPartitionStatisticsResponse{
// Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
// Status: merr.Status(nil),
// Stats: g.innerResult,
// }
// return nil
@ -538,7 +538,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return errors.New(result.Status.Reason)
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
// Status: merr.Status(nil),
// Stats: result.Stats,
// })
// } else { // some partitions have been loaded, get some partition statistics from datacoord
@ -561,7 +561,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return errors.New(result.Status.Reason)
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
// Status: merr.Status(nil),
// Stats: result.Stats,
// })
// }
@ -577,7 +577,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return err
// }
// g.result = &milvuspb.GetCollectionStatisticsResponse{
// Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
// Status: merr.Status(nil),
// Stats: g.innerResult,
// }
// return nil
@ -660,11 +660,8 @@ func (g *getCollectionStatisticsTask) Execute(ctx context.Context) error {
return errors.New(result.Status.Reason)
}
g.result = &milvuspb.GetCollectionStatisticsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Stats: result.Stats,
Status: merr.Status(nil),
Stats: result.Stats,
}
return nil
}
@ -753,11 +750,8 @@ func (g *getPartitionStatisticsTask) Execute(ctx context.Context) error {
return errors.New(result.Status.Reason)
}
g.result = &milvuspb.GetPartitionStatisticsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Stats: result.Stats,
Status: merr.Status(nil),
Stats: result.Stats,
}
return nil
}

View File

@ -1273,7 +1273,7 @@ func TestDropPartitionTask(t *testing.T) {
PartitionIDs: []int64{},
}, nil)
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
}, nil)
mockCache := NewMockCache(t)
@ -2554,7 +2554,7 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
case "c1":
return errors.New("error mock DropCollection")
case "c2":
return common.NewStatusError(commonpb.ErrorCode_CollectionNotExists, "collection not exist")
return merr.WrapErrCollectionNotFound("mock")
default:
return nil
}
@ -2594,7 +2594,7 @@ func Test_loadCollectionTask_Execute(t *testing.T) {
PartitionIDs: []int64{},
}, nil)
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
}, nil)
dbName := funcutil.GenRandomStr()
@ -2702,7 +2702,7 @@ func Test_loadPartitionTask_Execute(t *testing.T) {
PartitionIDs: []int64{},
}, nil)
qc.EXPECT().ShowCollections(mock.Anything, mock.Anything).Return(&querypb.ShowCollectionsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
}, nil)
dbName := funcutil.GenRandomStr()
@ -2802,7 +2802,7 @@ func TestCreateResourceGroupTask(t *testing.T) {
rc.Start()
defer rc.Stop()
qc := getQueryCoord()
qc.EXPECT().CreateResourceGroup(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
qc.EXPECT().CreateResourceGroup(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
qc.Start()
defer qc.Stop()
ctx := context.Background()
@ -2842,7 +2842,7 @@ func TestDropResourceGroupTask(t *testing.T) {
rc.Start()
defer rc.Stop()
qc := getQueryCoord()
qc.EXPECT().DropResourceGroup(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
qc.EXPECT().DropResourceGroup(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
qc.Start()
defer qc.Stop()
ctx := context.Background()
@ -2882,7 +2882,7 @@ func TestTransferNodeTask(t *testing.T) {
rc.Start()
defer rc.Stop()
qc := getQueryCoord()
qc.EXPECT().TransferNode(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
qc.EXPECT().TransferNode(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
qc.Start()
defer qc.Stop()
ctx := context.Background()
@ -2922,7 +2922,7 @@ func TestTransferNodeTask(t *testing.T) {
func TestTransferReplicaTask(t *testing.T) {
rc := &MockRootCoordClientInterface{}
qc := getQueryCoord()
qc.EXPECT().TransferReplica(mock.Anything, mock.Anything).Return(&commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil)
qc.EXPECT().TransferReplica(mock.Anything, mock.Anything).Return(merr.Status(nil), nil)
qc.Start()
defer qc.Stop()
ctx := context.Background()
@ -2966,7 +2966,7 @@ func TestListResourceGroupsTask(t *testing.T) {
rc := &MockRootCoordClientInterface{}
qc := getQueryCoord()
qc.EXPECT().ListResourceGroups(mock.Anything, mock.Anything).Return(&milvuspb.ListResourceGroupsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
ResourceGroups: []string{meta.DefaultResourceGroupName, "rg"},
}, nil)
qc.Start()
@ -3009,7 +3009,7 @@ func TestDescribeResourceGroupTask(t *testing.T) {
rc := &MockRootCoordClientInterface{}
qc := getQueryCoord()
qc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
ResourceGroup: &querypb.ResourceGroupInfo{
Name: "rg",
Capacity: 2,
@ -3105,7 +3105,7 @@ func TestDescribeResourceGroupTaskFailed(t *testing.T) {
qc.ExpectedCalls = nil
qc.EXPECT().Stop().Return(nil)
qc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
ResourceGroup: &querypb.ResourceGroupInfo{
Name: "rg",
Capacity: 2,

View File

@ -34,6 +34,7 @@ import (
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -275,9 +276,7 @@ func (it *upsertTask) PreExecute(ctx context.Context) error {
log := log.Ctx(ctx).With(zap.String("collectionName", collectionName))
it.result = &milvuspb.MutationResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
IDs: &schemapb.IDs{
IdField: nil,
},

View File

@ -742,26 +742,27 @@ func validateName(entity string, nameType string) error {
entity = strings.TrimSpace(entity)
if entity == "" {
return fmt.Errorf("%s should not be empty", nameType)
return merr.WrapErrParameterInvalid("not empty", entity, nameType+" should be not empty")
}
invalidMsg := fmt.Sprintf("invalid %s: %s. ", nameType, entity)
if len(entity) > Params.ProxyCfg.MaxNameLength.GetAsInt() {
msg := invalidMsg + fmt.Sprintf("the length of %s must be less than ", nameType) + Params.ProxyCfg.MaxNameLength.GetValue() + " characters."
return errors.New(msg)
return merr.WrapErrParameterInvalidRange(0,
Params.ProxyCfg.MaxNameLength.GetAsInt(),
len(entity),
fmt.Sprintf("the length of %s must be not greater than limit", nameType))
}
firstChar := entity[0]
if firstChar != '_' && !isAlpha(firstChar) {
msg := invalidMsg + fmt.Sprintf("the first character of %s must be an underscore or letter.", nameType)
return errors.New(msg)
return merr.WrapErrParameterInvalid('_',
firstChar,
fmt.Sprintf("the first character of %s must be an underscore or letter", nameType))
}
for i := 1; i < len(entity); i++ {
c := entity[i]
if c != '_' && c != '$' && !isAlpha(c) && !isNumber(c) {
msg := invalidMsg + fmt.Sprintf("%s can only contain numbers, letters, dollars and underscores.", nameType)
return errors.New(msg)
return merr.WrapErrParameterInvalidMsg("%s can only contain numbers, letters, dollars and underscores, found %c at %d", nameType, c, i)
}
}
return nil
@ -1035,7 +1036,7 @@ func fillFieldsDataBySchema(schema *schemapb.CollectionSchema, insertMsg *msgstr
for _, data := range insertMsg.FieldsData {
fieldName := data.GetFieldName()
if dataNameSet.Contain(fieldName) {
return merr.WrapErrParameterDuplicateFieldData(fieldName, "The FieldDatas parameter being passed contains duplicate data for a field.")
return merr.WrapErrParameterInvalidMsg("The FieldDatas parameter being passed contains duplicate data for field %s", fieldName)
}
dataNameSet.Insert(fieldName)
}

View File

@ -31,7 +31,6 @@ import (
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
"github.com/milvus-io/milvus/pkg/util/merr"
@ -79,14 +78,9 @@ func (broker *CoordinatorBroker) GetCollectionSchema(ctx context.Context, collec
return nil, err
}
statusErr := common.NewStatusError(resp.Status.ErrorCode, resp.Status.Reason)
if common.IsCollectionNotExistError(statusErr) {
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
err = errors.New(resp.GetStatus().GetReason())
log.Warn("failed to get collection schema", zap.Int64("collectionID", collectionID), zap.Error(err))
err = merr.Error(resp.GetStatus())
if err != nil {
log.Warn("failed to get collection schema", zap.Error(err))
return nil, err
}
return resp.GetSchema(), nil
@ -108,14 +102,9 @@ func (broker *CoordinatorBroker) GetPartitions(ctx context.Context, collectionID
return nil, err
}
statusErr := common.NewStatusError(resp.Status.ErrorCode, resp.Status.Reason)
if common.IsCollectionNotExistError(statusErr) {
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
err = errors.New(resp.GetStatus().GetReason())
log.Warn("showPartition failed", zap.Int64("collectionID", collectionID), zap.Error(err))
err = merr.Error(resp.GetStatus())
if err != nil {
log.Warn("failed to get partitions", zap.Error(err))
return nil, err
}

View File

@ -138,9 +138,7 @@ func TestCoordinatorBroker_GetPartitions(t *testing.T) {
t.Run("collection not exist", func(t *testing.T) {
rc := mocks.NewRootCoord(t)
rc.EXPECT().ShowPartitions(mock.Anything, mock.Anything).Return(&milvuspb.ShowPartitionsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_CollectionNotExists,
},
Status: merr.Status(merr.WrapErrCollectionNotFound("mock")),
}, nil)
ctx := context.Background()

View File

@ -28,12 +28,12 @@ import (
"go.uber.org/zap"
"google.golang.org/grpc"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/proto/querypb"
. "github.com/milvus-io/milvus/internal/querycoordv2/params"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -82,9 +82,7 @@ func (node *MockQueryNode) Start() error {
err = node.server.Serve(lis)
}()
successStatus := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
successStatus := merr.Status(nil)
node.EXPECT().GetDataDistribution(mock.Anything, mock.Anything).Return(&querypb.GetDataDistributionResponse{
Status: successStatus,
NodeID: node.ID,

View File

@ -52,6 +52,7 @@ import (
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
@ -510,30 +511,22 @@ func (s *Server) GetComponentStates(ctx context.Context) (*milvuspb.ComponentSta
}
return &milvuspb.ComponentStates{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
State: serviceComponentInfo,
Status: merr.Status(nil),
State: serviceComponentInfo,
//SubcomponentStates: subComponentInfos,
}, nil
}
func (s *Server) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
}, nil
}
func (s *Server) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Value: Params.CommonCfg.QueryCoordTimeTick.GetValue(),
Status: merr.Status(nil),
Value: Params.CommonCfg.QueryCoordTimeTick.GetValue(),
}, nil
}

View File

@ -727,10 +727,7 @@ func (s *Server) ShowConfigurations(ctx context.Context, req *internalpb.ShowCon
}
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Configuations: configList,
}, nil
}

View File

@ -251,7 +251,7 @@ func (sd *shardDelegator) applyDelete(ctx context.Context, nodeID int64, worker
log.Debug("delegator plan to applyDelete via worker")
err := retry.Do(ctx, func() error {
if sd.Stopped() {
return retry.Unrecoverable(merr.WrapErrChannelUnsubscribing(sd.vchannelName))
return retry.Unrecoverable(merr.WrapErrChannelNotAvailable(sd.vchannelName, "channel is unsubscribing"))
}
err := worker.Delete(ctx, &querypb.DeleteRequest{

View File

@ -440,7 +440,7 @@ func segmentStatsResponse(segStats []segments.SegmentStats) *internalpb.GetStati
resultMap["row_count"] = strconv.FormatInt(totalRowNum, 10)
ret := &internalpb.GetStatisticsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
Stats: funcutil.Map2KeyValuePair(resultMap),
}
return ret
@ -479,7 +479,7 @@ func reduceStatisticResponse(results []*internalpb.GetStatisticsResponse) (*inte
}
ret := &internalpb.GetStatisticsResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
Stats: funcutil.Map2KeyValuePair(stringMap),
}
return ret, nil

View File

@ -29,6 +29,7 @@ import (
"github.com/milvus-io/milvus/internal/querynodev2/segments"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/ratelimitutil"
@ -210,10 +211,7 @@ func getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetricsRequest,
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.QueryNodeRole, paramtable.GetNodeID()),
}, nil

View File

@ -80,7 +80,7 @@ func (m *manager) Add(collectionID UniqueID, channel string) (Pipeline, error) {
//get shard delegator for add growing in pipeline
delegator, ok := m.delegators.Get(channel)
if !ok {
return nil, merr.WrapErrShardDelegatorNotFound(channel)
return nil, merr.WrapErrChannelNotFound(channel, "delegator not found")
}
newPipeLine, err := NewPipeLine(collectionID, channel, m.dataManager, m.tSafeManager, m.dispatcher, delegator)

View File

@ -25,13 +25,13 @@ import (
"github.com/samber/lo"
"go.uber.org/zap"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/segcorepb"
typeutil2 "github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -222,9 +222,7 @@ func DecodeSearchResults(searchResults []*internalpb.SearchResults) ([]*schemapb
func EncodeSearchResultData(searchResultData *schemapb.SearchResultData, nq int64, topk int64, metricType string) (searchResults *internalpb.SearchResults, err error) {
searchResults = &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
NumQueries: nq,
TopK: topk,
MetricType: metricType,

View File

@ -58,9 +58,7 @@ import (
// GetComponentStates returns information about whether the node is healthy
func (node *QueryNode) GetComponentStates(ctx context.Context) (*milvuspb.ComponentStates, error) {
stats := &milvuspb.ComponentStates{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}
code := node.lifetime.GetState()
@ -82,11 +80,8 @@ func (node *QueryNode) GetComponentStates(ctx context.Context) (*milvuspb.Compon
// TimeTickChannel contains many time tick messages, which will be sent by query nodes
func (node *QueryNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Value: paramtable.Get().CommonCfg.QueryCoordTimeTick.GetValue(),
Status: merr.Status(nil),
Value: paramtable.Get().CommonCfg.QueryCoordTimeTick.GetValue(),
}, nil
}
@ -94,10 +89,7 @@ func (node *QueryNode) GetTimeTickChannel(ctx context.Context) (*milvuspb.String
// Statistics channel contains statistics infos of query nodes, such as segment infos, memory infos
func (node *QueryNode) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
}, nil
}
@ -132,9 +124,7 @@ func (node *QueryNode) GetStatistics(ctx context.Context, req *querypb.GetStatis
}, nil
}
failRet := &internalpb.GetStatisticsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}
var toReduceResults []*internalpb.GetStatisticsResponse
@ -256,8 +246,8 @@ func (node *QueryNode) WatchDmChannels(ctx context.Context, req *querypb.WatchDm
// to avoid concurrent watch/unwatch
if node.unsubscribingChannels.Contain(channel.GetChannelName()) {
err := merr.WrapErrChannelUnsubscribing(channel.GetChannelName())
log.Warn("abort watch unsubscribing channel", zap.Error(err))
err := merr.WrapErrChannelReduplicate(channel.GetChannelName(), "the other same channel is unsubscribing")
log.Warn("failed to unsubscribe channel", zap.Error(err))
return merr.Status(err), nil
}
@ -680,10 +670,8 @@ func (node *QueryNode) GetSegmentInfo(ctx context.Context, in *querypb.GetSegmen
}
return &querypb.GetSegmentInfoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Infos: segmentInfos,
Status: merr.Status(nil),
Infos: segmentInfos,
}, nil
}
@ -798,9 +786,7 @@ func (node *QueryNode) Search(ctx context.Context, req *querypb.SearchRequest) (
}
failRet := &internalpb.SearchResults{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Status: merr.Status(nil),
}
collection := node.manager.Collection.Get(req.GetReq().GetCollectionID())
if collection == nil {
@ -1075,10 +1061,7 @@ func (node *QueryNode) ShowConfigurations(ctx context.Context, req *internalpb.S
}
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Configuations: configList,
}, nil
}
@ -1239,7 +1222,7 @@ func (node *QueryNode) GetDataDistribution(ctx context.Context, req *querypb.Get
})
return &querypb.GetDataDistributionResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
NodeID: paramtable.GetNodeID(),
Segments: segmentVersionInfos,
Channels: channelVersionInfos,
@ -1346,10 +1329,7 @@ func (node *QueryNode) SyncDistribution(ctx context.Context, req *querypb.SyncDi
}, true)
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
return merr.Status(nil), nil
}
// Delete is used to forward delete message between delegator and workers.
@ -1406,8 +1386,5 @@ func (node *QueryNode) Delete(ctx context.Context, req *querypb.DeleteRequest) (
}
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
return merr.Status(nil), nil
}

View File

@ -377,7 +377,7 @@ func (suite *ServiceSuite) TestWatchDmChannels_Failed() {
suite.node.unsubscribingChannels.Insert(suite.vchannel)
status, err := suite.node.WatchDmChannels(ctx, req)
suite.NoError(err)
suite.Equal(status.GetReason(), merr.WrapErrChannelUnsubscribing(suite.vchannel).Error())
suite.ErrorIs(merr.Error(status), merr.ErrChannelReduplicate)
suite.node.unsubscribingChannels.Remove(suite.vchannel)
// init msgstream failed

View File

@ -11,6 +11,7 @@ import (
"github.com/milvus-io/milvus/internal/querynodev2/collector"
"github.com/milvus-io/milvus/internal/querynodev2/segments"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
@ -130,7 +131,7 @@ func (t *QueryTask) Execute() error {
Base: &commonpb.MsgBase{
SourceID: paramtable.GetNodeID(),
},
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
Ids: reducedResult.Ids,
FieldsData: reducedResult.FieldsData,
CostAggregation: &internalpb.CostAggregation{

View File

@ -20,6 +20,7 @@ import (
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/metrics"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/timerecord"
@ -166,7 +167,7 @@ func (t *SearchTask) Execute() error {
Base: &commonpb.MsgBase{
SourceID: paramtable.GetNodeID(),
},
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success},
Status: merr.Status(nil),
MetricType: req.GetReq().GetMetricType(),
NumQueries: t.originNqs[i],
TopK: t.originTopks[i],

View File

@ -22,11 +22,12 @@ import (
"go.uber.org/zap"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -55,7 +56,7 @@ func (t *dropCollectionTask) Execute(ctx context.Context) error {
// dropping collection with `ts1` but a collection exists in catalog with newer ts which is bigger than `ts1`.
// fortunately, if ddls are promised to execute in sequence, then everything is OK. The `ts1` will always be latest.
collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetDbName(), t.Req.GetCollectionName(), typeutil.MaxTimestamp)
if common.IsCollectionNotExistError(err) {
if errors.Is(err, merr.ErrCollectionNotFound) {
// make dropping collection idempotent.
log.Warn("drop non-existent collection", zap.String("collection", t.Req.GetCollectionName()))
return nil

View File

@ -29,8 +29,8 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/metastore/model"
mockrootcoord "github.com/milvus-io/milvus/internal/rootcoord/mocks"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
)
func Test_dropCollectionTask_Prepare(t *testing.T) {
@ -98,7 +98,7 @@ func Test_dropCollectionTask_Execute(t *testing.T) {
mock.Anything,
).Return(nil, func(ctx context.Context, dbName string, name string, ts Timestamp) error {
if collectionName == name {
return common.NewCollectionNotExistError("collection not exist")
return merr.WrapErrCollectionNotFound(collectionName)
}
return errors.New("error mock GetCollectionByName")
})

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/util/importutil"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -436,10 +437,8 @@ func (m *importManager) importJob(ctx context.Context, req *milvuspb.ImportReque
}
resp := &milvuspb.ImportResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
},
Tasks: make([]int64, 0),
Status: merr.Status(nil),
Tasks: make([]int64, 0),
}
log.Info("receive import job",
@ -735,9 +734,7 @@ func (m *importManager) setCollectionPartitionName(dbName string, colID, partID
}
func (m *importManager) copyTaskInfo(input *datapb.ImportTaskInfo, output *milvuspb.GetImportStateResponse) {
output.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}
output.Status = merr.Status(nil)
output.Id = input.GetId()
output.CollectionId = input.GetCollectionId()

View File

@ -37,6 +37,7 @@ import (
"github.com/milvus-io/milvus/pkg/util"
"github.com/milvus-io/milvus/pkg/util/contextutil"
"github.com/milvus-io/milvus/pkg/util/funcutil"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/timerecord"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -493,13 +494,13 @@ func filterUnavailable(coll *model.Collection) *model.Collection {
func (mt *MetaTable) getLatestCollectionByIDInternal(ctx context.Context, collectionID UniqueID, allowAvailable bool) (*model.Collection, error) {
coll, ok := mt.collID2Meta[collectionID]
if !ok || coll == nil {
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection: %d", collectionID))
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
if allowAvailable {
return coll.Clone(), nil
}
if !coll.Available() {
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection: %d", collectionID))
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
return filterUnavailable(coll), nil
}
@ -527,7 +528,7 @@ func (mt *MetaTable) getCollectionByIDInternal(ctx context.Context, dbName strin
if coll == nil {
// use coll.Name to match error message of regression. TODO: remove this after error code is ready.
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection: %d", collectionID))
return nil, merr.WrapErrCollectionNotFound(collectionID)
}
if allowUnavailable {
@ -536,7 +537,7 @@ func (mt *MetaTable) getCollectionByIDInternal(ctx context.Context, dbName strin
if !coll.Available() {
// use coll.Name to match error message of regression. TODO: remove this after error code is ready.
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection %s:%s", dbName, coll.Name))
return nil, merr.WrapErrCollectionNotFound(dbName, coll.Name)
}
return filterUnavailable(coll), nil
@ -566,7 +567,7 @@ func (mt *MetaTable) getCollectionByNameInternal(ctx context.Context, dbName str
}
if isMaxTs(ts) {
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection %s:%s", dbName, collectionName))
return nil, merr.WrapErrCollectionNotFoundWithDB(dbName, collectionName)
}
db, err := mt.getDatabaseByNameInternal(ctx, dbName, typeutil.MaxTimestamp)
@ -582,7 +583,7 @@ func (mt *MetaTable) getCollectionByNameInternal(ctx context.Context, dbName str
}
if coll == nil || !coll.Available() {
return nil, common.NewCollectionNotExistError(fmt.Sprintf("can't find collection %s:%s", dbName, collectionName))
return nil, merr.WrapErrCollectionNotFoundWithDB(dbName, collectionName)
}
return filterUnavailable(coll), nil
}
@ -742,7 +743,7 @@ func (mt *MetaTable) RenameCollection(ctx context.Context, dbName string, oldNam
log.Warn("check new collection fail")
return fmt.Errorf("duplicated new collection name %s:%s with other collection name or alias", newDBName, newName)
}
if err != nil && !common.IsCollectionNotExistErrorV2(err) {
if err != nil && !errors.Is(err, merr.ErrCollectionNotFound) {
log.Warn("check new collection name fail")
return err
}

View File

@ -34,8 +34,8 @@ import (
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
mocktso "github.com/milvus-io/milvus/internal/tso/mocks"
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/util"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -463,7 +463,7 @@ func TestMetaTable_getCollectionByIDInternal(t *testing.T) {
ctx := context.Background()
_, err := meta.getCollectionByIDInternal(ctx, util.DefaultDBName, 100, 101, false)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
coll, err := meta.getCollectionByIDInternal(ctx, util.DefaultDBName, 100, 101, true)
assert.NoError(t, err)
assert.False(t, coll.Available())
@ -602,7 +602,7 @@ func TestMetaTable_GetCollectionByName(t *testing.T) {
ctx := context.Background()
_, err := meta.GetCollectionByName(ctx, util.DefaultDBName, "name", 101)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
})
t.Run("normal case, filter unavailable partitions", func(t *testing.T) {
@ -642,7 +642,7 @@ func TestMetaTable_GetCollectionByName(t *testing.T) {
meta := &MetaTable{names: newNameDb(), aliases: newNameDb()}
_, err := meta.GetCollectionByName(ctx, "", "not_exist", typeutil.MaxTimestamp)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
})
}
@ -715,7 +715,7 @@ func TestMetaTable_getLatestCollectionByIDInternal(t *testing.T) {
mt := &MetaTable{collID2Meta: nil}
_, err := mt.getLatestCollectionByIDInternal(ctx, 100, false)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
})
t.Run("nil case", func(t *testing.T) {
@ -725,7 +725,7 @@ func TestMetaTable_getLatestCollectionByIDInternal(t *testing.T) {
}}
_, err := mt.getLatestCollectionByIDInternal(ctx, 100, false)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
})
t.Run("unavailable", func(t *testing.T) {
@ -735,7 +735,7 @@ func TestMetaTable_getLatestCollectionByIDInternal(t *testing.T) {
}}
_, err := mt.getLatestCollectionByIDInternal(ctx, 100, false)
assert.Error(t, err)
assert.True(t, common.IsCollectionNotExistError(err))
assert.ErrorIs(t, err, merr.ErrCollectionNotFound)
coll, err := mt.getLatestCollectionByIDInternal(ctx, 100, true)
assert.NoError(t, err)
assert.False(t, coll.Available())
@ -1258,7 +1258,7 @@ func TestMetaTable_RenameCollection(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
).Return(nil, common.NewCollectionNotExistError("error"))
).Return(nil, merr.WrapErrCollectionNotFound("error"))
meta := &MetaTable{
dbName2Meta: map[string]*model.Database{
@ -1286,7 +1286,7 @@ func TestMetaTable_RenameCollection(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
).Return(nil, common.NewCollectionNotExistError("error"))
).Return(nil, merr.WrapErrCollectionNotFound("error"))
meta := &MetaTable{
dbName2Meta: map[string]*model.Database{
util.DefaultDBName: model.NewDefaultDatabase(),
@ -1323,7 +1323,7 @@ func TestMetaTable_RenameCollection(t *testing.T) {
mock.Anything,
mock.Anything,
mock.Anything,
).Return(nil, common.NewCollectionNotExistError("error"))
).Return(nil, merr.WrapErrCollectionNotFound("error"))
meta := &MetaTable{
dbName2Meta: map[string]*model.Database{
util.DefaultDBName: model.NewDefaultDatabase(),

View File

@ -25,6 +25,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/hardware"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
"github.com/milvus-io/milvus/pkg/util/paramtable"
"github.com/milvus-io/milvus/pkg/util/typeutil"
@ -78,10 +79,7 @@ func (c *Core) getSystemInfoMetrics(ctx context.Context, req *milvuspb.GetMetric
}
return &milvuspb.GetMetricsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Response: resp,
ComponentName: metricsinfo.ConstructComponentName(typeutil.RootCoordRole, c.session.ServerID),
}, nil

View File

@ -1111,19 +1111,15 @@ func (c *Core) describeCollectionImpl(ctx context.Context, in *milvuspb.Describe
log.Info("failed to enqueue request to describe collection", zap.Error(err))
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc()
return &milvuspb.DescribeCollectionResponse{
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
Status: merr.Status(err),
// Status: common.StatusFromError(err),
}, nil
}
if err := t.WaitToFinish(); err != nil {
log.Info("failed to describe collection", zap.Error(err))
log.Warn("failed to describe collection", zap.Error(err))
metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc()
return &milvuspb.DescribeCollectionResponse{
// TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now.
Status: merr.Status(err),
// Status: common.StatusFromError(err),
}, nil
}
@ -1581,10 +1577,7 @@ func (c *Core) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfi
}
return &internalpb.ShowConfigurationsResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
},
Status: merr.Status(nil),
Configuations: configList,
}, nil
}
@ -1988,9 +1981,7 @@ func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (
}
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
return merr.Status(nil), nil
}
// ExpireCredCache will call invalidate credential cache

View File

@ -28,6 +28,7 @@ import (
"github.com/milvus-io/milvus/pkg/common"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/mq/msgstream"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/typeutil"
)
@ -114,10 +115,7 @@ func failStatus(code commonpb.ErrorCode, reason string) *commonpb.Status {
}
func succStatus() *commonpb.Status {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}
return merr.Status(nil)
}
type TimeTravelRequest interface {

View File

@ -457,5 +457,5 @@ func IsCrossClusterRoutingErr(err error) bool {
func IsServerIDMismatchErr(err error) bool {
// GRPC utilizes `status.Status` to encapsulate errors,
// hence it is not viable to employ the `errors.Is` for assessment.
return strings.Contains(err.Error(), merr.ErrServerIDMismatch.Error())
return strings.Contains(err.Error(), merr.ErrNodeNotMatch.Error())
}

View File

@ -27,6 +27,7 @@ import (
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/pkg/util/merr"
)
var _ rootcoordpb.RootCoordClient = &GrpcRootCoordClient{}
@ -48,7 +49,7 @@ func (m *GrpcRootCoordClient) ListDatabases(ctx context.Context, in *milvuspb.Li
}
func (m *GrpcRootCoordClient) RenameCollection(ctx context.Context, in *milvuspb.RenameCollectionRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}, nil
return merr.Status(nil), nil
}
func (m *GrpcRootCoordClient) CheckHealth(ctx context.Context, in *milvuspb.CheckHealthRequest, opts ...grpc.CallOption) (*milvuspb.CheckHealthResponse, error) {

View File

@ -18,10 +18,8 @@ package common
import (
"fmt"
"strings"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
)
var (
@ -76,68 +74,3 @@ type KeyNotExistError struct {
func (k *KeyNotExistError) Error() string {
return fmt.Sprintf("there is no value on key = %s", k.key)
}
type statusError struct {
commonpb.Status
}
func (e *statusError) Error() string {
return fmt.Sprintf("code: %s, reason: %s", e.GetErrorCode().String(), e.GetReason())
}
func NewStatusError(code commonpb.ErrorCode, reason string) *statusError {
return &statusError{Status: commonpb.Status{ErrorCode: code, Reason: reason}}
}
func IsStatusError(e error) bool {
_, ok := e.(*statusError)
return ok
}
var (
// static variable, save temporary memory.
collectionNotExistCodes = []commonpb.ErrorCode{
commonpb.ErrorCode_UnexpectedError, // TODO: remove this after SDK remove this dependency.
commonpb.ErrorCode_CollectionNotExists,
}
)
func NewCollectionNotExistError(msg string) *statusError {
return NewStatusError(commonpb.ErrorCode_CollectionNotExists, msg)
}
func IsCollectionNotExistError(e error) bool {
statusError, ok := e.(*statusError)
if !ok {
return false
}
// cycle import: common -> funcutil -> types -> sessionutil -> common
// return funcutil.SliceContain(collectionNotExistCodes, statusError.GetErrorCode())
if statusError.Status.ErrorCode == commonpb.ErrorCode_CollectionNotExists {
return true
}
if (statusError.Status.ErrorCode == commonpb.ErrorCode_UnexpectedError) && strings.Contains(statusError.Status.Reason, "can't find collection") {
return true
}
return false
}
func IsCollectionNotExistErrorV2(e error) bool {
statusError, ok := e.(*statusError)
if !ok {
return false
}
return statusError.GetErrorCode() == commonpb.ErrorCode_CollectionNotExists
}
func StatusFromError(e error) *commonpb.Status {
if e == nil {
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
}
statusError, ok := e.(*statusError)
if !ok {
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: e.Error()}
}
return &commonpb.Status{ErrorCode: statusError.GetErrorCode(), Reason: statusError.GetReason()}
}

View File

@ -17,11 +17,9 @@
package common
import (
"strings"
"testing"
"github.com/cockroachdb/errors"
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
"github.com/stretchr/testify/assert"
)
@ -37,45 +35,3 @@ func TestNotExistError(t *testing.T) {
assert.Equal(t, false, IsKeyNotExistError(err))
assert.Equal(t, true, IsKeyNotExistError(NewKeyNotExistError("foo")))
}
func TestStatusError_Error(t *testing.T) {
err := NewCollectionNotExistError("collection not exist")
assert.True(t, IsStatusError(err))
assert.True(t, strings.Contains(err.Error(), "collection not exist"))
}
func TestIsStatusError(t *testing.T) {
err := NewCollectionNotExistError("collection not exist")
assert.True(t, IsStatusError(err))
assert.False(t, IsStatusError(errors.New("not status error")))
assert.False(t, IsStatusError(nil))
}
func Test_IsCollectionNotExistError(t *testing.T) {
assert.False(t, IsCollectionNotExistError(nil))
assert.False(t, IsCollectionNotExistError(errors.New("not status error")))
for _, code := range collectionNotExistCodes {
err := NewStatusError(code, "can't find collection")
assert.True(t, IsCollectionNotExistError(err))
}
assert.True(t, IsCollectionNotExistError(NewCollectionNotExistError("collection not exist")))
assert.False(t, IsCollectionNotExistError(NewStatusError(commonpb.ErrorCode_BuildIndexError, "")))
}
func TestIsCollectionNotExistErrorV2(t *testing.T) {
assert.False(t, IsCollectionNotExistErrorV2(nil))
assert.False(t, IsCollectionNotExistErrorV2(errors.New("not status error")))
assert.True(t, IsCollectionNotExistErrorV2(NewCollectionNotExistError("collection not exist")))
assert.False(t, IsCollectionNotExistErrorV2(NewStatusError(commonpb.ErrorCode_BuildIndexError, "")))
}
func TestStatusFromError(t *testing.T) {
var status *commonpb.Status
status = StatusFromError(nil)
assert.Equal(t, commonpb.ErrorCode_Success, status.GetErrorCode())
status = StatusFromError(errors.New("not status error"))
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.GetErrorCode())
assert.Equal(t, "not status error", status.GetReason())
status = StatusFromError(NewCollectionNotExistError("collection not exist"))
assert.Equal(t, commonpb.ErrorCode_CollectionNotExists, status.GetErrorCode())
}

View File

@ -49,7 +49,7 @@ func ServerIDValidationUnaryServerInterceptor(fn GetServerIDFunc) grpc.UnaryServ
}
actualServerID := fn()
if serverID != actualServerID {
return nil, merr.WrapErrServerIDMismatch(serverID, actualServerID)
return nil, merr.WrapErrNodeNotMatch(serverID, actualServerID)
}
return handler(ctx, req)
}
@ -73,7 +73,7 @@ func ServerIDValidationStreamServerInterceptor(fn GetServerIDFunc) grpc.StreamSe
}
actualServerID := fn()
if serverID != actualServerID {
return merr.WrapErrServerIDMismatch(serverID, actualServerID)
return merr.WrapErrNodeNotMatch(serverID, actualServerID)
}
return handler(srv, ss)
}

View File

@ -101,7 +101,7 @@ func TestServerIDInterceptor(t *testing.T) {
md = metadata.Pairs(ServerIDKey, "1234")
ctx = metadata.NewIncomingContext(context.Background(), md)
_, err = interceptor(ctx, req, serverInfo, handler)
assert.ErrorIs(t, err, merr.ErrServerIDMismatch)
assert.ErrorIs(t, err, merr.ErrNodeNotMatch)
// with same ServerID
md = metadata.Pairs(ServerIDKey, fmt.Sprint(paramtable.GetNodeID()))
@ -137,7 +137,7 @@ func TestServerIDInterceptor(t *testing.T) {
md = metadata.Pairs(ServerIDKey, "1234")
ctx = metadata.NewIncomingContext(context.Background(), md)
err = interceptor(nil, newMockSS(ctx), nil, handler)
assert.ErrorIs(t, err, merr.ErrServerIDMismatch)
assert.ErrorIs(t, err, merr.ErrNodeNotMatch)
// with same ServerID
md = metadata.Pairs(ServerIDKey, fmt.Sprint(paramtable.GetNodeID()))

View File

@ -40,7 +40,6 @@ var (
ErrServiceInternal = newMilvusError("service internal error", 5, false) // Never return this error out of Milvus
ErrCrossClusterRouting = newMilvusError("cross cluster routing", 6, false)
ErrServiceDiskLimitExceeded = newMilvusError("disk limit exceeded", 7, false)
ErrServerIDMismatch = newMilvusError("server ID mismatch", 8, false)
// Collection related
ErrCollectionNotFound = newMilvusError("collection not found", 100, false)
@ -60,12 +59,11 @@ var (
ErrReplicaNotFound = newMilvusError("replica not found", 400, false)
ErrReplicaNotAvailable = newMilvusError("replica not available", 401, false)
// Channel related
ErrChannelNotFound = newMilvusError("channel not found", 500, false)
ErrChannelLack = newMilvusError("channel lacks", 501, false)
ErrChannelReduplicate = newMilvusError("channel reduplicates", 502, false)
ErrChannelNotAvailable = newMilvusError("channel not available", 503, false)
ErrChannelUnsubscribing = newMilvusError("chanel is unsubscribing", 504, true)
// Channel & Delegator related
ErrChannelNotFound = newMilvusError("channel not found", 500, false)
ErrChannelLack = newMilvusError("channel lacks", 501, false)
ErrChannelReduplicate = newMilvusError("channel reduplicates", 502, false)
ErrChannelNotAvailable = newMilvusError("channel not available", 503, false)
// Segment related
ErrSegmentNotFound = newMilvusError("segment not found", 600, false)
@ -102,15 +100,6 @@ var (
ErrTopicNotFound = newMilvusError("topic not found", 1300, false)
ErrTopicNotEmpty = newMilvusError("topic not empty", 1301, false)
// shard delegator related
ErrShardDelegatorNotFound = newMilvusError("shard delegator not found", 1500, false)
ErrShardDelegatorAccessFailed = newMilvusError("fail to access shard delegator", 1501, true)
ErrShardDelegatorSearchFailed = newMilvusError("fail to search on all shard leaders", 1502, true)
ErrShardDelegatorQueryFailed = newMilvusError("fail to query on all shard leaders", 1503, true)
ErrShardDelegatorStatisticFailed = newMilvusError("get statistics on all shard leaders", 1504, true)
ErrShardDelegatorSQTimeout = newMilvusError("search/query on shard leader timeout", 1505, true)
ErrShardDelegatorSQFailed = newMilvusError("fail to search/query shard leader", 1506, true)
// field related
ErrFieldNotFound = newMilvusError("field not found", 1700, false)

View File

@ -77,7 +77,7 @@ func (s *ErrSuite) TestWrap() {
s.ErrorIs(WrapErrServiceInternal("never throw out"), ErrServiceInternal)
s.ErrorIs(WrapErrCrossClusterRouting("ins-0", "ins-1"), ErrCrossClusterRouting)
s.ErrorIs(WrapErrServiceDiskLimitExceeded(110, 100, "DLE"), ErrServiceDiskLimitExceeded)
s.ErrorIs(WrapErrServerIDMismatch(0, 1, "SIM"), ErrServerIDMismatch)
s.ErrorIs(WrapErrNodeNotMatch(0, 1, "SIM"), ErrNodeNotMatch)
// Collection related
s.ErrorIs(WrapErrCollectionNotFound("test_collection", "failed to get collection"), ErrCollectionNotFound)
@ -100,7 +100,6 @@ func (s *ErrSuite) TestWrap() {
s.ErrorIs(WrapErrChannelNotFound("test_Channel", "failed to get Channel"), ErrChannelNotFound)
s.ErrorIs(WrapErrChannelLack("test_Channel", "failed to get Channel"), ErrChannelLack)
s.ErrorIs(WrapErrChannelReduplicate("test_Channel", "failed to get Channel"), ErrChannelReduplicate)
s.ErrorIs(WrapErrChannelUnsubscribing("test_channel"), ErrChannelUnsubscribing)
// Segment related
s.ErrorIs(WrapErrSegmentNotFound(1, "failed to get Segment"), ErrSegmentNotFound)
@ -131,11 +130,6 @@ func (s *ErrSuite) TestWrap() {
s.ErrorIs(WrapErrTopicNotFound("unknown", "failed to get topic"), ErrTopicNotFound)
s.ErrorIs(WrapErrTopicNotEmpty("unknown", "topic is not empty"), ErrTopicNotEmpty)
// shard delegator related
s.ErrorIs(WrapErrShardDelegatorNotFound("unknown", "fail to get shard delegator"), ErrShardDelegatorNotFound)
s.ErrorIs(WrapErrShardDelegatorSQFailed("fake"), ErrShardDelegatorSQFailed)
s.ErrorIs(WrapErrShardDelegatorSQTimeout("fake"), ErrShardDelegatorSQTimeout)
// field related
s.ErrorIs(WrapErrFieldNotFound("meta", "failed to get field"), ErrFieldNotFound)
}

View File

@ -60,6 +60,10 @@ func IsRetriable(err error) bool {
return Code(err)&retriableFlag != 0
}
func IsCanceledOrTimeout(err error) bool {
return errors.IsAny(err, context.Canceled, context.DeadlineExceeded)
}
// Status returns a status according to the given err,
// returns Success status if err is nil
func Status(err error) *commonpb.Status {
@ -196,14 +200,6 @@ func WrapErrServiceDiskLimitExceeded(predict, limit float32, msg ...string) erro
return err
}
func WrapErrServerIDMismatch(expectedID, actualID int64, msg ...string) error {
err := errors.Wrapf(ErrServerIDMismatch, "expected=%s, actual=%s", expectedID, actualID)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrDatabaseNotFound(database any, msg ...string) error {
err := wrapWithField(ErrDatabaseNotfound, "database", database)
if len(msg) > 0 {
@ -237,6 +233,14 @@ func WrapErrCollectionNotFound(collection any, msg ...string) error {
return err
}
func WrapErrCollectionNotFoundWithDB(db any, collection any, msg ...string) error {
err := errors.Wrapf(ErrCollectionNotFound, "collection %v:%v", db, collection)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrCollectionNotLoaded(collection any, msg ...string) error {
err := wrapWithField(ErrCollectionNotLoaded, "collection", collection)
if len(msg) > 0 {
@ -345,14 +349,6 @@ func WrapErrChannelNotAvailable(name string, msg ...string) error {
return err
}
func WrapErrChannelUnsubscribing(name string, msg ...string) error {
err := wrapWithField(ErrChannelUnsubscribing, "channel", name)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
// Segment related
func WrapErrSegmentNotFound(id int64, msg ...string) error {
err := wrapWithField(ErrSegmentNotFound, "segment", id)
@ -463,18 +459,15 @@ func WrapErrParameterInvalid[T any](expected, actual T, msg ...string) error {
}
func WrapErrParameterInvalidRange[T any](lower, upper, actual T, msg ...string) error {
err := errors.Wrapf(ErrParameterInvalid, "expected in (%v, %v), actual=%v", lower, upper, actual)
err := errors.Wrapf(ErrParameterInvalid, "expected in [%v, %v], actual=%v", lower, upper, actual)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrParameterDuplicateFieldData(fieldName string, msg ...string) error {
err := errors.Wrapf(ErrParameterInvalid, "field name=%v", fieldName)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
func WrapErrParameterInvalidMsg(fmt string, args ...any) error {
err := errors.Wrapf(ErrParameterInvalid, fmt, args...)
return err
}
@ -504,63 +497,6 @@ func WrapErrTopicNotEmpty(name string, msg ...string) error {
return err
}
// shard delegator related
func WrapErrShardDelegatorNotFound(channel string, msg ...string) error {
err := errors.Wrapf(ErrShardDelegatorNotFound, "channel=%s", channel)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorAccessFailed(channel string, msg ...string) error {
err := errors.Wrapf(ErrShardDelegatorAccessFailed, "channel=%s", channel)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorSQTimeout(channel string, msg ...string) error {
err := errors.Wrapf(ErrShardDelegatorSQTimeout, "channel=%s", channel)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorSQFailed(channel string, msg ...string) error {
err := errors.Wrapf(ErrShardDelegatorSQFailed, "channel=%s", channel)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorSearchFailed(msg ...string) error {
err := error(ErrShardDelegatorSearchFailed)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorQueryFailed(msg ...string) error {
err := error(ErrShardDelegatorQueryFailed)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
func WrapErrShardDelegatorStatisticFailed(msg ...string) error {
err := error(ErrShardDelegatorStatisticFailed)
if len(msg) > 0 {
err = errors.Wrap(err, strings.Join(msg, "; "))
}
return err
}
// field related
func WrapErrFieldNotFound[T any](field T, msg ...string) error {
err := errors.Wrapf(ErrFieldNotFound, "field=%v", field)

View File

@ -43,7 +43,7 @@ class TestAliasParamsInvalid(TestcaseBase):
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 1, ct.err_msg: f"Invalid collection alias: {alias_name}"}
error = {ct.err_code: 1, ct.err_msg: "Invalid collection alias"}
self.utility_wrap.create_alias(collection_w.name, alias_name,
check_task=CheckTasks.err_res,
check_items=error)
@ -79,7 +79,8 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: alias_name,
exp_schema: default_schema})
# assert collection is equal to alias according to partitions
assert [p.name for p in collection_w.partitions] == [p.name for p in collection_alias.partitions]
assert [p.name for p in collection_w.partitions] == [
p.name for p in collection_alias.partitions]
@pytest.mark.tags(CaseLabel.L1)
def test_alias_alter_operation_default(self):
@ -105,8 +106,8 @@ class TestAliasOperation(TestcaseBase):
partition_name = cf.gen_unique_str("partition")
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_1, partition_name)
assert collection_1.has_partition(partition_name)[0]
assert collection_1.has_partition(partition_name)[0]
alias_a_name = cf.gen_unique_str(prefix)
self.utility_wrap.create_alias(collection_1.name, alias_a_name)
collection_alias_a, _ = self.collection_wrap.init_collection(name=alias_a_name,
@ -114,8 +115,9 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: alias_a_name,
exp_schema: default_schema})
# assert collection is equal to alias according to partitions
assert [p.name for p in collection_1.partitions] == [p.name for p in collection_alias_a.partitions]
assert [p.name for p in collection_1.partitions] == [
p.name for p in collection_alias_a.partitions]
# create collection_2 with 5 partitions and its alias alias_b
c_2_name = cf.gen_unique_str("collection")
collection_2 = self.init_collection_wrap(name=c_2_name, schema=default_schema,
@ -135,15 +137,19 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: alias_b_name,
exp_schema: default_schema})
# assert collection is equal to alias according to partitions
assert [p.name for p in collection_2.partitions] == [p.name for p in collection_alias_b.partitions]
assert [p.name for p in collection_2.partitions] == [
p.name for p in collection_alias_b.partitions]
# collection_1 alter alias to alias_b
self.utility_wrap.alter_alias(collection_1.name, alias_b_name)
# collection_1 has two alias name, alias_a and alias_b, but collection_2 has no alias any more
assert [p.name for p in collection_1.partitions] == [p.name for p in collection_alias_b.partitions]
assert [p.name for p in collection_1.partitions] == [p.name for p in collection_alias_a.partitions]
assert [p.name for p in collection_2.partitions] != [p.name for p in collection_alias_b.partitions]
assert [p.name for p in collection_1.partitions] == [
p.name for p in collection_alias_b.partitions]
assert [p.name for p in collection_1.partitions] == [
p.name for p in collection_alias_a.partitions]
assert [p.name for p in collection_2.partitions] != [
p.name for p in collection_alias_b.partitions]
@pytest.mark.tags(CaseLabel.L1)
def test_alias_drop_operation_default(self):
@ -176,7 +182,8 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: alias_name,
exp_schema: default_schema})
# assert collection is equal to alias according to partitions
assert [p.name for p in collection_w.partitions] == [p.name for p in collection_alias.partitions]
assert [p.name for p in collection_w.partitions] == [
p.name for p in collection_alias.partitions]
self.utility_wrap.drop_alias(alias_name)
error = {ct.err_code: 0,
ct.err_msg: f"Collection '{alias_name}' not exist, or you can pass in schema to create one"}
@ -216,7 +223,7 @@ class TestAliasOperation(TestcaseBase):
check_task=CheckTasks.check_collection_property,
check_items={exp_name: alias_name,
exp_schema: default_schema})
# create partition by alias
partition_name = cf.gen_unique_str("partition")
try:
@ -225,11 +232,11 @@ class TestAliasOperation(TestcaseBase):
log.info(f"alias create partition failed with exception {e}")
create_partition_flag = False
collection_w.create_partition(partition_name)
# assert partition
pytest.assume(create_partition_flag is True and
[p.name for p in collection_alias.partitions] == [p.name for p in collection_w.partitions])
# insert data by alias
df = cf.gen_default_dataframe_data(ct.default_nb)
try:
@ -238,26 +245,29 @@ class TestAliasOperation(TestcaseBase):
log.info(f"alias insert data failed with exception {e}")
insert_data_flag = False
collection_w.insert(data=df)
# assert insert data
pytest.assume(insert_data_flag is True and
collection_w.num_entities == ct.default_nb and
collection_alias.num_entities == ct.default_nb)
# create index by alias
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
default_index = {"index_type": "IVF_FLAT",
"params": {"nlist": 128}, "metric_type": "L2"}
try:
collection_alias.create_index(field_name="float_vector", index_params=default_index)
collection_alias.create_index(
field_name="float_vector", index_params=default_index)
except Exception as e:
log.info(f"alias create index failed with exception {e}")
create_index_flag = False
collection_w.create_index(field_name="float_vector", index_params=default_index)
collection_w.create_index(
field_name="float_vector", index_params=default_index)
# assert create index
pytest.assume(create_index_flag is True and
collection_alias.has_index() is True and
collection_w.has_index()[0] is True)
# load by alias
try:
collection_alias.load()
@ -271,8 +281,9 @@ class TestAliasOperation(TestcaseBase):
# search by alias
topK = 5
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
query = [[random.random() for _ in range(ct.default_dim)] for _ in range(1)]
query = [[random.random() for _ in range(ct.default_dim)]
for _ in range(1)]
alias_res = None
try:
alias_res = collection_alias.search(
@ -282,13 +293,14 @@ class TestAliasOperation(TestcaseBase):
except Exception as e:
log.info(f"alias search failed with exception {e}")
search_flag = False
collection_res, _ = collection_w.search(
query, "float_vector", search_params, topK,
"int64 >= 0", output_fields=["int64"]
)
# assert search
pytest.assume(search_flag is True and alias_res[0].ids == collection_res[0].ids)
pytest.assume(
search_flag is True and alias_res[0].ids == collection_res[0].ids)
# release by alias
try:
@ -314,7 +326,7 @@ class TestAliasOperation(TestcaseBase):
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
alias_name = cf.gen_unique_str(prefix)
self.utility_wrap.create_alias(collection_w.name, alias_name)
# collection_w.create_alias(alias_name)
@ -340,7 +352,7 @@ class TestAliasOperation(TestcaseBase):
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
alias_name = cf.gen_unique_str(prefix)
self.utility_wrap.create_alias(collection_w.name, alias_name)
# collection_w.create_alias(alias_name)
@ -349,7 +361,8 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: alias_name,
exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f"cannot drop the collection via alias = {alias_name}"}
error = {ct.err_code: 1,
ct.err_msg: f"cannot drop the collection via alias = {alias_name}"}
self.utility_wrap.drop_collection(alias_name,
check_task=CheckTasks.err_res,
check_items=error)
@ -372,7 +385,7 @@ class TestAliasOperation(TestcaseBase):
check_items={exp_name: c_name, exp_schema: default_schema})
partition_name = cf.gen_unique_str("partition")
self.init_partition_wrap(collection_w, partition_name)
alias_name = cf.gen_unique_str(prefix)
self.utility_wrap.create_alias(collection_w.name, alias_name)
# collection_w.create_alias(alias_name)
@ -411,7 +424,8 @@ class TestAliasOperationInvalid(TestcaseBase):
collection_2 = self.init_collection_wrap(name=c_2_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_2_name, exp_schema: default_schema})
error = {ct.err_code: 1, ct.err_msg: "Create alias failed: duplicate collection alias"}
error = {ct.err_code: 1,
ct.err_msg: "Create alias failed: duplicate collection alias"}
self.utility_wrap.create_alias(collection_2.name, alias_a_name,
check_task=CheckTasks.err_res,
check_items=error)
@ -439,7 +453,8 @@ class TestAliasOperationInvalid(TestcaseBase):
# collection_w.create_alias(alias_name)
alias_not_exist_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 1, ct.err_msg: "Alter alias failed: alias does not exist"}
error = {ct.err_code: 1,
ct.err_msg: "Alter alias failed: alias does not exist"}
self.utility_wrap.alter_alias(collection_w.name, alias_not_exist_name,
check_task=CheckTasks.err_res,
check_items=error)
@ -466,7 +481,8 @@ class TestAliasOperationInvalid(TestcaseBase):
# collection_w.create_alias(alias_name)
alias_not_exist_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 1, ct.err_msg: "Drop alias failed: alias does not exist"}
error = {ct.err_code: 1,
ct.err_msg: "Drop alias failed: alias does not exist"}
# self.utility_wrap.drop_alias(alias_not_exist_name,
# check_task=CheckTasks.err_res,
# check_items=error)
@ -510,7 +526,7 @@ class TestAliasOperationInvalid(TestcaseBase):
# collection_w.drop_alias(alias_name,
# check_task=CheckTasks.err_res,
# check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_alias_create_dup_name_collection(self):
"""
@ -556,6 +572,6 @@ class TestAliasOperationInvalid(TestcaseBase):
check_task=CheckTasks.check_collection_property,
check_items={exp_name: alias_name,
exp_schema: default_schema})
with pytest.raises(Exception):
collection_alias.drop()

View File

@ -1068,7 +1068,7 @@ class TestCollectionOperation(TestcaseBase):
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: collection not found: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@ -1895,7 +1895,7 @@ class TestDropCollection(TestcaseBase):
c_name = cf.gen_unique_str()
self.init_collection_wrap(name=c_name)
c_name_2 = cf.gen_unique_str()
# error = {ct.err_code: 0, ct.err_msg: 'DescribeCollection failed: can\'t find collection: %s' % c_name_2}
# error = {ct.err_code: 0, ct.err_msg: 'DescribeCollection failed: collection not found: %s' % c_name_2}
# self.utility_wrap.drop_collection(c_name_2, check_task=CheckTasks.err_res, check_items=error)
# @longjiquan: dropping collection should be idempotent.
self.utility_wrap.drop_collection(c_name_2)
@ -3360,7 +3360,7 @@ class TestLoadPartition(TestcaseBase):
"is_empty": True, "num_entities": 0}
)
collection_w.drop()
error = {ct.err_code: 0, ct.err_msg: "can\'t find collection"}
error = {ct.err_code: 0, ct.err_msg: "collection not found"}
partition_w.load(check_task=CheckTasks.err_res, check_items=error)
partition_w.release(check_task=CheckTasks.err_res, check_items=error)

View File

@ -177,8 +177,7 @@ class TestIndexParams(TestcaseBase):
index_name=index_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "CreateIndex failed: index already exist, "
"but parameters are inconsistent"})
ct.err_msg: "invalid parameter"})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.xfail(reason="issue 19181")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -551,9 +551,8 @@ class TestUtilityParams(TestcaseBase):
new_collection_name = cf.gen_unique_str(prefix)
self.utility_wrap.rename_collection(old_collection_name, new_collection_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection {} was not "
"loaded into memory)".format(collection_w.name)})
check_items={"err_code": 4,
"err_msg": "collection not found"})
@pytest.mark.tags(CaseLabel.L2)
def test_rename_collection_new_invalid_type(self, get_invalid_type_collection_name):