Fix panic while handling with the nil status (#27040)

Signed-off-by: yah01 <yah2er0ne@outlook.com>
This commit is contained in:
yah01 2023-09-15 10:09:21 +08:00 committed by GitHub
parent 22d7fa4e1c
commit 168e82ee10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 555 additions and 550 deletions

View File

@ -154,7 +154,7 @@ func (gc *garbageCollector) scan() {
if err != nil {
log.Error("failed to list files with prefix",
zap.String("prefix", prefix),
zap.String("error", err.Error()),
zap.Error(err),
)
}
log.Info("gc scan finish list object", zap.String("prefix", prefix), zap.Duration("time spent", time.Since(startTs)), zap.Int("keys", len(infoKeys)))

View File

@ -168,8 +168,8 @@ func (s *Server) getDataNodeMetrics(ctx context.Context, req *milvuspb.GetMetric
if metrics.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("invalid metrics of DataNode was found",
zap.Any("error_code", metrics.Status.ErrorCode),
zap.Any("error_reason", metrics.Status.Reason))
zap.Any("error_code", metrics.GetStatus().GetErrorCode()),
zap.Any("error_reason", metrics.GetStatus().GetReason()))
infos.BaseComponentInfos.ErrorReason = metrics.GetStatus().GetReason()
return infos, nil
}
@ -208,8 +208,8 @@ func (s *Server) getIndexNodeMetrics(ctx context.Context, req *milvuspb.GetMetri
if metrics.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("invalid metrics of DataNode was found",
zap.Any("error_code", metrics.Status.ErrorCode),
zap.Any("error_reason", metrics.Status.Reason))
zap.Any("error_code", metrics.GetStatus().GetErrorCode()),
zap.Any("error_reason", metrics.GetStatus().GetReason()))
infos.BaseComponentInfos.ErrorReason = metrics.GetStatus().GetReason()
return infos, nil
}

View File

@ -96,7 +96,7 @@ func TestGetSegmentInfoChannel(t *testing.T) {
t.Run("get segment info channel", func(t *testing.T) {
resp, err := svr.GetSegmentInfoChannel(context.TODO())
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, Params.CommonCfg.DataCoordSegmentInfo.GetValue(), resp.Value)
})
}
@ -131,7 +131,7 @@ func TestAssignSegmentID(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, 1, len(resp.SegIDAssignments))
assign := resp.SegIDAssignments[0]
assert.EqualValues(t, commonpb.ErrorCode_Success, assign.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, assign.GetStatus().GetErrorCode())
assert.EqualValues(t, collID, assign.CollectionID)
assert.EqualValues(t, partID, assign.PartitionID)
assert.EqualValues(t, channel0, assign.ChannelName)
@ -163,7 +163,7 @@ func TestAssignSegmentID(t *testing.T) {
assert.NoError(t, err)
assert.EqualValues(t, 1, len(resp.SegIDAssignments))
assign := resp.SegIDAssignments[0]
assert.EqualValues(t, commonpb.ErrorCode_Success, assign.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, assign.GetStatus().GetErrorCode())
assert.EqualValues(t, collID, assign.CollectionID)
assert.EqualValues(t, partID, assign.PartitionID)
assert.EqualValues(t, channel0, assign.ChannelName)
@ -268,7 +268,7 @@ func TestFlush(t *testing.T) {
resp, err := svr.Flush(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
svr.meta.SetCurrentRows(segID, 1)
ids, err := svr.segmentManager.GetFlushableSegments(context.TODO(), "channel-1", expireTs)
@ -290,7 +290,7 @@ func TestFlush(t *testing.T) {
resp, err := svr.Flush(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.SegmentIDs))
// should not flush anything since this is a normal flush
svr.meta.SetCurrentRows(segID, 1)
@ -312,7 +312,7 @@ func TestFlush(t *testing.T) {
resp, err = svr.Flush(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.SegmentIDs))
ids, err = svr.segmentManager.GetFlushableSegments(context.TODO(), "channel-1", expireTs)
@ -356,7 +356,7 @@ func TestFlush(t *testing.T) {
//resp, err := svr.GetComponentStates(context.TODO())
//assert.NoError(t, err)
//assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
//assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
//assert.EqualValues(t, commonpb.StateCode_Healthy, resp.State.StateCode)
//assert.EqualValues(t, 1, len(resp.SubcomponentStates))
//assert.EqualValues(t, commonpb.StateCode_Healthy, resp.SubcomponentStates[0].StateCode)
@ -367,7 +367,7 @@ func TestGetTimeTickChannel(t *testing.T) {
defer closeTestServer(t, svr)
resp, err := svr.GetTimeTickChannel(context.TODO())
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, Params.CommonCfg.DataCoordTimeTick.GetValue(), resp.Value)
}
@ -414,7 +414,7 @@ func TestGetSegmentStates(t *testing.T) {
SegmentIDs: []int64{test.id},
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.States))
if test.expected {
assert.EqualValues(t, test.expectedState, resp.States[0].State)
@ -470,7 +470,7 @@ func TestGetInsertBinlogPaths(t *testing.T) {
}
resp, err := svr.GetInsertBinlogPaths(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("with invalid segmentID", func(t *testing.T) {
@ -528,7 +528,7 @@ func TestGetCollectionStatistics(t *testing.T) {
}
resp, err := svr.GetCollectionStatistics(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("with closed server", func(t *testing.T) {
@ -554,7 +554,7 @@ func TestGetPartitionStatistics(t *testing.T) {
}
resp, err := svr.GetPartitionStatistics(context.Background(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("with closed server", func(t *testing.T) {
svr := newTestServer(t, nil)
@ -606,7 +606,7 @@ func TestGetSegmentInfo(t *testing.T) {
// Check that # of rows is corrected from 100 to 60.
assert.EqualValues(t, 60, resp.GetInfos()[0].GetNumOfRows())
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("with wrong segmentID", func(t *testing.T) {
svr := newTestServer(t, nil)
@ -624,7 +624,7 @@ func TestGetSegmentInfo(t *testing.T) {
}
resp, err := svr.GetSegmentInfo(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
})
t.Run("with closed server", func(t *testing.T) {
svr := newTestServer(t, nil)
@ -690,7 +690,7 @@ func TestGetSegmentInfo(t *testing.T) {
// no channel checkpoint
resp, err := svr.GetSegmentInfo(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(resp.GetChannelCheckpoint()))
// with nil insert channel of segment
@ -698,7 +698,7 @@ func TestGetSegmentInfo(t *testing.T) {
assert.NoError(t, err)
resp, err = svr.GetSegmentInfo(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(resp.GetChannelCheckpoint()))
// normal test
@ -709,7 +709,7 @@ func TestGetSegmentInfo(t *testing.T) {
assert.NoError(t, err)
resp, err = svr.GetSegmentInfo(svr.ctx, req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 1, len(resp.GetChannelCheckpoint()))
assert.Equal(t, mockPChannel, resp.ChannelCheckpoint[mockVChannel].ChannelName)
assert.Equal(t, Timestamp(1000), resp.ChannelCheckpoint[mockVChannel].Timestamp)
@ -1138,14 +1138,14 @@ func TestServer_ShowConfigurations(t *testing.T) {
svr.stateCode.Store(commonpb.StateCode_Initializing)
resp, err := svr.ShowConfigurations(svr.ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
// normal case
svr.stateCode.Store(stateSave)
resp, err = svr.ShowConfigurations(svr.ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 1, len(resp.Configuations))
assert.Equal(t, "datacoord.port", resp.Configuations[0].Key)
}
@ -1161,7 +1161,7 @@ func TestServer_GetMetrics(t *testing.T) {
svr.stateCode.Store(commonpb.StateCode_Initializing)
resp, err := svr.GetMetrics(svr.ctx, &milvuspb.GetMetricsRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
svr.stateCode.Store(stateSave)
// failed to parse metric type
@ -1170,7 +1170,7 @@ func TestServer_GetMetrics(t *testing.T) {
Request: invalidRequest,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// unsupported metric type
unsupportedMetricType := "unsupported"
@ -1178,14 +1178,14 @@ func TestServer_GetMetrics(t *testing.T) {
assert.NoError(t, err)
resp, err = svr.GetMetrics(svr.ctx, req)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// normal case
req, err = metricsinfo.ConstructRequestByMetricType(metricsinfo.SystemInfoMetrics)
assert.NoError(t, err)
resp, err = svr.GetMetrics(svr.ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
log.Info("TestServer_GetMetrics",
zap.String("name", resp.ComponentName),
zap.String("response", resp.Response))
@ -2524,7 +2524,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetBinlogs()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.Nil(t, resp.GetChannels()[0].SeekPosition)
@ -2648,7 +2648,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.EqualValues(t, 0, len(resp.GetChannels()[0].GetUnflushedSegmentIds()))
assert.ElementsMatch(t, []int64{0, 1}, resp.GetChannels()[0].GetFlushedSegmentIds())
@ -2726,7 +2726,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetBinlogs()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -2825,7 +2825,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.GetBinlogs()))
assert.EqualValues(t, 0, resp.GetBinlogs()[0].GetSegmentID())
assert.EqualValues(t, 1, len(resp.GetBinlogs()[0].GetFieldBinlogs()))
@ -2867,7 +2867,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetBinlogs()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -2910,7 +2910,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetBinlogs()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -2991,7 +2991,7 @@ func TestGetRecoveryInfo(t *testing.T) {
}
resp, err := svr.GetRecoveryInfo(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
assert.NotEqual(t, 0, resp.GetChannels()[0].GetSeekPosition().GetTimestamp())
assert.Len(t, resp.GetChannels()[0].GetDroppedSegmentIds(), 0)
@ -3094,7 +3094,7 @@ func TestManualCompaction(t *testing.T) {
Timetravel: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("test manual compaction failure", func(t *testing.T) {
@ -3113,7 +3113,7 @@ func TestManualCompaction(t *testing.T) {
Timetravel: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
})
t.Run("test manual compaction with closed server", func(t *testing.T) {
@ -3132,8 +3132,8 @@ func TestManualCompaction(t *testing.T) {
Timetravel: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.Status.Reason)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.GetStatus().GetReason())
})
}
@ -3159,7 +3159,7 @@ func TestGetCompactionStateWithPlans(t *testing.T) {
CompactionID: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, commonpb.CompactionState_Executing, resp.State)
})
@ -3183,8 +3183,8 @@ func TestGetCompactionStateWithPlans(t *testing.T) {
CompactionID: 1,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.Status.Reason)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
assert.Equal(t, msgDataCoordIsUnhealthy(paramtable.GetNodeID()), resp.GetStatus().GetReason())
})
}
@ -3822,7 +3822,7 @@ func TestDataCoordServer_SetSegmentState(t *testing.T) {
SegmentIDs: []int64{1000},
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.States))
assert.EqualValues(t, commonpb.SegmentState_Flushed, resp.States[0].State)
})
@ -3848,7 +3848,7 @@ func TestDataCoordServer_SetSegmentState(t *testing.T) {
SegmentIDs: []int64{1000},
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.States))
assert.EqualValues(t, commonpb.SegmentState_NotExist, resp.States[0].State)
})
@ -4581,7 +4581,7 @@ func TestDataNodeTtChannel(t *testing.T) {
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.SegIDAssignments))
assign := resp.SegIDAssignments[0]
@ -4596,7 +4596,7 @@ func TestDataNodeTtChannel(t *testing.T) {
CollectionID: 0,
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp2.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp2.GetStatus().GetErrorCode())
msgPack := msgstream.MsgPack{}
msg := genMsg(commonpb.MsgType_DataNodeTt, "ch-1", assign.ExpireTime)
@ -4652,7 +4652,7 @@ func TestDataNodeTtChannel(t *testing.T) {
},
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 2, len(resp.SegIDAssignments))
var assign *datapb.SegmentIDAssignment
for _, segment := range resp.SegIDAssignments {
@ -4673,7 +4673,7 @@ func TestDataNodeTtChannel(t *testing.T) {
CollectionID: 0,
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp2.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp2.GetStatus().GetErrorCode())
msgPack := msgstream.MsgPack{}
msg := genMsg(commonpb.MsgType_DataNodeTt, "ch-1", assign.ExpireTime)
@ -4728,7 +4728,7 @@ func TestDataNodeTtChannel(t *testing.T) {
},
})
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.SegIDAssignments))
assignedSegmentID := resp.SegIDAssignments[0].SegID

View File

@ -112,7 +112,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.Nil(t, resp.GetChannels()[0].SeekPosition)
@ -235,7 +235,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.EqualValues(t, 0, len(resp.GetChannels()[0].GetUnflushedSegmentIds()))
assert.ElementsMatch(t, []int64{0, 1}, resp.GetChannels()[0].GetFlushedSegmentIds())
@ -311,7 +311,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -410,7 +410,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 1, len(resp.GetSegments()))
assert.EqualValues(t, 0, resp.GetSegments()[0].GetID())
assert.EqualValues(t, 1, len(resp.GetSegments()[0].GetBinlogs()))
@ -451,7 +451,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -493,7 +493,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.EqualValues(t, 0, len(resp.GetSegments()))
assert.EqualValues(t, 1, len(resp.GetChannels()))
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
@ -573,7 +573,7 @@ func TestGetRecoveryInfoV2(t *testing.T) {
}
resp, err := svr.GetRecoveryInfoV2(context.TODO(), req)
assert.NoError(t, err)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.NotNil(t, resp.GetChannels()[0].SeekPosition)
assert.NotEqual(t, 0, resp.GetChannels()[0].GetSeekPosition().GetTimestamp())
assert.Len(t, resp.GetChannels()[0].GetDroppedSegmentIds(), 0)

View File

@ -565,7 +565,7 @@ func (node *DataNode) getPartitions(ctx context.Context, dbName string, collecti
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("failed to get partitions of collection", logFields...)
return nil, errors.New(resp.Status.Reason)
return nil, errors.New(resp.GetStatus().GetReason())
}
partitionNames := resp.GetPartitionNames()
@ -708,7 +708,7 @@ func assignSegmentFunc(node *DataNode, req *datapb.ImportTaskRequest) importutil
return 0, "", fmt.Errorf("syncSegmentID Failed:%w", err)
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return 0, "", fmt.Errorf("syncSegmentID Failed:%s", resp.Status.Reason)
return 0, "", fmt.Errorf("syncSegmentID Failed:%s", resp.GetStatus().GetReason())
}
if len(resp.SegIDAssignments) == 0 || resp.SegIDAssignments[0] == nil {
return 0, "", fmt.Errorf("syncSegmentID Failed: the collection was dropped")
@ -992,7 +992,7 @@ func reportImportFunc(node *DataNode) importutil.ReportFunc {
log.Error("fail to report import state to RootCoord", zap.Error(err))
return err
}
if status != nil && status.ErrorCode != commonpb.ErrorCode_Success {
if status.GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(status.GetReason())
}
return nil

View File

@ -162,13 +162,13 @@ func TestIndexNodeClient(t *testing.T) {
states, err := inc.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.StateCode_Healthy, states.State.StateCode)
assert.Equal(t, commonpb.ErrorCode_Success, states.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, states.GetStatus().GetErrorCode())
})
t.Run("GetStatisticsChannel", func(t *testing.T) {
resp, err := inc.GetStatisticsChannel(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("CreatJob", func(t *testing.T) {
@ -185,7 +185,7 @@ func TestIndexNodeClient(t *testing.T) {
req := &indexpb.QueryJobsRequest{}
resp, err := inc.QueryJobs(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("DropJob", func(t *testing.T) {
@ -201,7 +201,7 @@ func TestIndexNodeClient(t *testing.T) {
}
resp, err := inc.ShowConfigurations(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetMetrics", func(t *testing.T) {
@ -209,14 +209,14 @@ func TestIndexNodeClient(t *testing.T) {
assert.NoError(t, err)
resp, err := inc.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetJobStats", func(t *testing.T) {
req := &indexpb.GetJobStatsRequest{}
resp, err := inc.GetJobStats(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
err = ins.Stop()

View File

@ -85,7 +85,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
log.Debug("IndexNode", zap.String("network address", Params.GetAddress()), zap.Int("network port: ", grpcPort))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
if err != nil {
log.Warn("IndexNode", zap.String("GrpcServer:failed to listen", err.Error()))
log.Warn("IndexNode", zap.Error(err))
s.grpcErrChan <- err
return
}

View File

@ -72,7 +72,7 @@ func TestIndexNodeServer(t *testing.T) {
req := &internalpb.GetStatisticsChannelRequest{}
resp, err := server.GetStatisticsChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("CreateJob", func(t *testing.T) {
@ -91,7 +91,7 @@ func TestIndexNodeServer(t *testing.T) {
req := &indexpb.QueryJobsRequest{}
resp, err := server.QueryJobs(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("DropJobs", func(t *testing.T) {
@ -107,7 +107,7 @@ func TestIndexNodeServer(t *testing.T) {
}
resp, err := server.ShowConfigurations(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetMetrics", func(t *testing.T) {
@ -115,14 +115,14 @@ func TestIndexNodeServer(t *testing.T) {
assert.NoError(t, err)
resp, err := server.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetTaskSlots", func(t *testing.T) {
req := &indexpb.GetJobStatsRequest{}
resp, err := server.GetJobStats(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
err = server.Stop()

View File

@ -47,7 +47,7 @@ func (h *Handlers) checkDatabase(c *gin.Context, dbName string) bool {
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
return false
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
return false
}
for _, db := range response.DbNames {
@ -74,8 +74,8 @@ func (h *Handlers) describeCollection(c *gin.Context, dbName string, collectionN
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
return nil, err
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
return nil, errors.New(response.Status.Reason)
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
return nil, errors.New(response.GetStatus().GetReason())
}
primaryField, ok := getPrimaryField(response.Schema)
if ok && primaryField.AutoID && !response.Schema.AutoID {
@ -95,8 +95,8 @@ func (h *Handlers) hasCollection(c *gin.Context, dbName string, collectionName s
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
return false, err
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
return false, errors.New(response.Status.Reason)
c.AbortWithStatusJSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
return false, errors.New(response.GetStatus().GetReason())
} else {
return response.Value, nil
}
@ -129,7 +129,7 @@ func (h *Handlers) listCollections(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
var collections []string
if response.CollectionNames != nil {
@ -261,9 +261,9 @@ func (h *Handlers) getCollectionDetails(c *gin.Context) {
})
collLoadState := ""
if stateErr != nil {
log.Warn("get collection load state fail", zap.String("collection", collectionName), zap.String("err", stateErr.Error()))
log.Warn("get collection load state fail", zap.Error(stateErr))
} else if stateResp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("get collection load state fail", zap.String("collection", collectionName), zap.String("err", stateResp.Status.Reason))
log.Warn("get collection load state fail", zap.String("collection", collectionName), zap.String("err", stateResp.GetStatus().GetReason()))
} else {
collLoadState = stateResp.State.String()
}
@ -282,10 +282,10 @@ func (h *Handlers) getCollectionDetails(c *gin.Context) {
var indexDesc []gin.H
if indexErr != nil {
indexDesc = []gin.H{}
log.Warn("get indexes description fail", zap.String("collection", collectionName), zap.String("vectorField", vectorField), zap.String("err", indexErr.Error()))
log.Warn("get indexes description fail", zap.Error(indexErr))
} else if indexResp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
indexDesc = []gin.H{}
log.Warn("get indexes description fail", zap.String("collection", collectionName), zap.String("vectorField", vectorField), zap.String("err", indexResp.Status.Reason))
log.Warn("get indexes description fail", zap.String("collection", collectionName), zap.String("vectorField", vectorField), zap.String("err", indexResp.GetStatus().GetReason()))
} else {
indexDesc = printIndexes(indexResp.IndexDescriptions)
}
@ -382,7 +382,7 @@ func (h *Handlers) query(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
outputData, err := buildQueryResp(int64(0), response.OutputFields, response.FieldsData, nil, nil)
if err != nil {
@ -436,7 +436,7 @@ func (h *Handlers) get(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
outputData, err := buildQueryResp(int64(0), response.OutputFields, response.FieldsData, nil, nil)
if err != nil {
@ -488,7 +488,7 @@ func (h *Handlers) delete(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: http.StatusOK, HTTPReturnData: gin.H{}})
}
@ -549,7 +549,7 @@ func (h *Handlers) insert(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
switch response.IDs.GetIdField().(type) {
case *schemapb.IDs_IntId:
@ -608,7 +608,7 @@ func (h *Handlers) search(c *gin.Context) {
if err != nil {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: Code(err), HTTPReturnMessage: err.Error()})
} else if response.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.Status.ErrorCode), HTTPReturnMessage: response.Status.Reason})
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: int32(response.GetStatus().GetErrorCode()), HTTPReturnMessage: response.GetStatus().GetReason()})
} else {
if response.Results.TopK == int64(0) {
c.JSON(http.StatusOK, gin.H{HTTPReturnCode: http.StatusOK, HTTPReturnData: []interface{}{}})

View File

@ -231,7 +231,7 @@ func (s *Server) startGrpcLoop(grpcPort int) {
log.Debug("network", zap.String("port", strconv.Itoa(grpcPort)))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(grpcPort))
if err != nil {
log.Debug("GrpcServer:failed to listen:", zap.String("error", err.Error()))
log.Debug("GrpcServer:failed to listen:", zap.Error(err))
s.grpcErrChan <- err
return
}

View File

@ -172,7 +172,7 @@ func Test_NewServer(t *testing.T) {
)
resp, err := server.GetStatisticsChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetTimeTickChannel", func(t *testing.T) {
@ -184,7 +184,7 @@ func Test_NewServer(t *testing.T) {
)
resp, err := server.GetTimeTickChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("ShowCollections", func(t *testing.T) {
@ -196,7 +196,7 @@ func Test_NewServer(t *testing.T) {
resp, err := server.ShowCollections(ctx, nil)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("LoadCollection", func(t *testing.T) {
@ -252,7 +252,7 @@ func Test_NewServer(t *testing.T) {
mqc.EXPECT().GetSegmentInfo(mock.Anything, req).Return(&querypb.GetSegmentInfoResponse{Status: successStatus}, nil)
resp, err := server.GetSegmentInfo(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("LoadBalance", func(t *testing.T) {
@ -270,7 +270,7 @@ func Test_NewServer(t *testing.T) {
mqc.EXPECT().GetMetrics(mock.Anything, req).Return(&milvuspb.GetMetricsResponse{Status: successStatus}, nil)
resp, err := server.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("CheckHealth", func(t *testing.T) {
@ -314,14 +314,14 @@ func Test_NewServer(t *testing.T) {
mqc.EXPECT().ListResourceGroups(mock.Anything, req).Return(&milvuspb.ListResourceGroupsResponse{Status: successStatus}, nil)
resp, err := server.ListResourceGroups(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("DescribeResourceGroup", func(t *testing.T) {
mqc.EXPECT().DescribeResourceGroup(mock.Anything, mock.Anything).Return(&querypb.DescribeResourceGroupResponse{Status: successStatus}, nil)
resp, err := server.DescribeResourceGroup(ctx, nil)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
err = server.Stop()

View File

@ -117,7 +117,7 @@ func Test_NewServer(t *testing.T) {
req := &internalpb.GetStatisticsChannelRequest{}
resp, err := server.GetStatisticsChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetTimeTickChannel", func(t *testing.T) {
@ -125,7 +125,7 @@ func Test_NewServer(t *testing.T) {
req := &internalpb.GetTimeTickChannelRequest{}
resp, err := server.GetTimeTickChannel(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("WatchDmChannels", func(t *testing.T) {
@ -182,7 +182,7 @@ func Test_NewServer(t *testing.T) {
req := &querypb.GetSegmentInfoRequest{}
resp, err := server.GetSegmentInfo(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("GetMetrics", func(t *testing.T) {
@ -193,7 +193,7 @@ func Test_NewServer(t *testing.T) {
}
resp, err := server.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("Search", func(t *testing.T) {

View File

@ -258,7 +258,7 @@ func (s *Server) startGrpcLoop(port int) {
log.Debug("start grpc ", zap.Int("port", port))
lis, err := net.Listen("tcp", ":"+strconv.Itoa(port))
if err != nil {
log.Error("GrpcServer:failed to listen", zap.String("error", err.Error()))
log.Error("GrpcServer:failed to listen", zap.Error(err))
s.grpcErrChan <- err
return
}

View File

@ -237,7 +237,7 @@ func TestRun(t *testing.T) {
t.Run("ListDatabases", func(t *testing.T) {
ret, err := svr.ListDatabases(ctx, nil)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, ret.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, ret.GetStatus().GetErrorCode())
})
err = svr.Stop()
assert.NoError(t, err)

View File

@ -26,6 +26,7 @@ import (
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
)
@ -40,7 +41,7 @@ func TestAbnormalIndexNode(t *testing.T) {
qresp, err := in.QueryJobs(ctx, &indexpb.QueryJobsRequest{})
assert.NoError(t, err)
assert.Equal(t, qresp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, qresp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
status, err = in.DropJobs(ctx, &indexpb.DropJobsRequest{})
assert.NoError(t, err)
@ -48,15 +49,15 @@ func TestAbnormalIndexNode(t *testing.T) {
jobNumRsp, err := in.GetJobStats(ctx, &indexpb.GetJobStatsRequest{})
assert.NoError(t, err)
assert.Equal(t, jobNumRsp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, jobNumRsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
metricsResp, err := in.GetMetrics(ctx, &milvuspb.GetMetricsRequest{})
assert.NoError(t, err)
assert.Equal(t, metricsResp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, metricsResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
configurationResp, err := in.ShowConfigurations(ctx, &internalpb.ShowConfigurationsRequest{})
assert.NoError(t, err)
assert.Equal(t, configurationResp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, configurationResp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
}
func TestGetMetrics(t *testing.T) {
@ -69,7 +70,7 @@ func TestGetMetrics(t *testing.T) {
defer in.Stop()
resp, err := in.GetMetrics(ctx, metricReq)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
t.Logf("Component: %s, Metrics: %s", resp.ComponentName, resp.Response)
}
@ -86,15 +87,15 @@ func TestGetMetricsError(t *testing.T) {
}
resp, err := in.GetMetrics(ctx, errReq)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
unsupportedReq := &milvuspb.GetMetricsRequest{
Request: `{"metric_type": "application_info"}`,
}
resp, err = in.GetMetrics(ctx, unsupportedReq)
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, resp.Status.Reason, metricsinfo.MsgUnimplementedMetric)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
assert.Equal(t, resp.GetStatus().GetReason(), metricsinfo.MsgUnimplementedMetric)
}
func TestMockFieldData(t *testing.T) {

View File

@ -404,20 +404,20 @@ import (
// t.Run("GetComponentStates", func(t *testing.T) {
// resp, err := in.GetComponentStates(ctx)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// assert.Equal(t, commonpb.StateCode_Healthy, resp.State.StateCode)
// })
//
// t.Run("GetTimeTickChannel", func(t *testing.T) {
// resp, err := in.GetTimeTickChannel(ctx)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// })
//
// t.Run("GetStatisticsChannel", func(t *testing.T) {
// resp, err := in.GetStatisticsChannel(ctx)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// })
//
// t.Run("ShowConfigurations", func(t *testing.T) {
@ -432,7 +432,7 @@ import (
//
// resp, err := in.ShowConfigurations(ctx, req)
// assert.NoError(t, err)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
// assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// assert.Equal(t, 1, len(resp.Configuations))
// assert.Equal(t, "indexnode.port", resp.Configuations[0].Key)
// })
@ -466,26 +466,26 @@ func TestComponentState(t *testing.T) {
in.SetEtcdClient(getEtcdClient())
state, err := in.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Abnormal)
assert.Nil(t, in.Init())
state, err = in.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Initializing)
assert.Nil(t, in.Start())
state, err = in.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Healthy)
assert.Nil(t, in.Stop())
assert.Nil(t, in.Stop())
state, err = in.GetComponentStates(ctx)
assert.NoError(t, err)
assert.Equal(t, state.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, state.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.Equal(t, state.State.StateCode, commonpb.StateCode_Abnormal)
}
@ -500,7 +500,7 @@ func TestGetTimeTickChannel(t *testing.T) {
in := NewIndexNode(ctx, factory)
ret, err := in.GetTimeTickChannel(ctx)
assert.NoError(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, ret.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
}
func TestGetStatisticChannel(t *testing.T) {
@ -515,7 +515,7 @@ func TestGetStatisticChannel(t *testing.T) {
ret, err := in.GetStatisticsChannel(ctx)
assert.NoError(t, err)
assert.Equal(t, ret.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, ret.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
}
func TestIndexTaskWhenStoppingNode(t *testing.T) {

View File

@ -106,7 +106,7 @@ func (node *Proxy) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringRe
// InvalidateCollectionMetaCache invalidate the meta cache of specific collection.
func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
ctx = logutil.WithModule(ctx, moduleName)
@ -148,7 +148,7 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p
}
func (node *Proxy) CreateDatabase(ctx context.Context, request *milvuspb.CreateDatabaseRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -194,7 +194,7 @@ func (node *Proxy) CreateDatabase(ctx context.Context, request *milvuspb.CreateD
}
func (node *Proxy) DropDatabase(ctx context.Context, request *milvuspb.DropDatabaseRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -238,8 +238,8 @@ func (node *Proxy) DropDatabase(ctx context.Context, request *milvuspb.DropDatab
func (node *Proxy) ListDatabases(ctx context.Context, request *milvuspb.ListDatabasesRequest) (*milvuspb.ListDatabasesResponse, error) {
resp := &milvuspb.ListDatabasesResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -286,7 +286,7 @@ func (node *Proxy) ListDatabases(ctx context.Context, request *milvuspb.ListData
// CreateCollection create a collection by the schema.
// TODO(dragondriver): add more detailed ut for ConsistencyLevel, should we support multiple consistency level in Proxy?
func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -355,7 +355,7 @@ func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.Creat
// DropCollection drop a collection.
func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -414,7 +414,7 @@ func (node *Proxy) DropCollection(ctx context.Context, request *milvuspb.DropCol
// HasCollection check if the specific collection exists in Milvus.
func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.BoolResponse{
Status: unhealthyStatus(),
}, nil
@ -481,7 +481,7 @@ func (node *Proxy) HasCollection(ctx context.Context, request *milvuspb.HasColle
// LoadCollection load a collection into query nodes.
func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -542,7 +542,7 @@ func (node *Proxy) LoadCollection(ctx context.Context, request *milvuspb.LoadCol
// ReleaseCollection remove the loaded collection from query nodes.
func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -607,7 +607,7 @@ func (node *Proxy) ReleaseCollection(ctx context.Context, request *milvuspb.Rele
// DescribeCollection get the meta information of specific collection, such as schema, created timestamp and etc.
func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.DescribeCollectionResponse{
Status: unhealthyStatus(),
}, nil
@ -679,7 +679,7 @@ func (node *Proxy) DescribeCollection(ctx context.Context, request *milvuspb.Des
// GetStatistics get the statistics, such as `num_rows`.
// WARNING: It is an experimental API
func (node *Proxy) GetStatistics(ctx context.Context, request *milvuspb.GetStatisticsRequest) (*milvuspb.GetStatisticsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetStatisticsResponse{
Status: unhealthyStatus(),
}, nil
@ -759,7 +759,7 @@ func (node *Proxy) GetStatistics(ctx context.Context, request *milvuspb.GetStati
// GetCollectionStatistics get the collection statistics, such as `num_rows`.
func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvuspb.GetCollectionStatisticsRequest) (*milvuspb.GetCollectionStatisticsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetCollectionStatisticsResponse{
Status: unhealthyStatus(),
}, nil
@ -831,7 +831,7 @@ func (node *Proxy) GetCollectionStatistics(ctx context.Context, request *milvusp
// ShowCollections list all collections in Milvus.
func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.ShowCollectionsResponse{
Status: unhealthyStatus(),
}, nil
@ -897,7 +897,7 @@ func (node *Proxy) ShowCollections(ctx context.Context, request *milvuspb.ShowCo
}
func (node *Proxy) AlterCollection(ctx context.Context, request *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -961,7 +961,7 @@ func (node *Proxy) AlterCollection(ctx context.Context, request *milvuspb.AlterC
// CreatePartition create a partition in specific collection.
func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1026,7 +1026,7 @@ func (node *Proxy) CreatePartition(ctx context.Context, request *milvuspb.Create
// DropPartition drop a partition in specific collection.
func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1092,7 +1092,7 @@ func (node *Proxy) DropPartition(ctx context.Context, request *milvuspb.DropPart
// HasPartition check if partition exist.
func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.BoolResponse{
Status: unhealthyStatus(),
}, nil
@ -1170,7 +1170,7 @@ func (node *Proxy) HasPartition(ctx context.Context, request *milvuspb.HasPartit
// LoadPartitions load specific partitions into query nodes.
func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitionsRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1239,7 +1239,7 @@ func (node *Proxy) LoadPartitions(ctx context.Context, request *milvuspb.LoadPar
// ReleasePartitions release specific partitions from query nodes.
func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionsRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1308,7 +1308,7 @@ func (node *Proxy) ReleasePartitions(ctx context.Context, request *milvuspb.Rele
// GetPartitionStatistics get the statistics of partition, such as num_rows.
func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb.GetPartitionStatisticsRequest) (*milvuspb.GetPartitionStatisticsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetPartitionStatisticsResponse{
Status: unhealthyStatus(),
}, nil
@ -1382,7 +1382,7 @@ func (node *Proxy) GetPartitionStatistics(ctx context.Context, request *milvuspb
// ShowPartitions list all partitions in the specific collection.
func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.ShowPartitionsResponse{
Status: unhealthyStatus(),
}, nil
@ -1467,7 +1467,7 @@ func (node *Proxy) ShowPartitions(ctx context.Context, request *milvuspb.ShowPar
}
func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.GetLoadingProgressRequest) (*milvuspb.GetLoadingProgressResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetLoadingProgressResponse{Status: unhealthyStatus()}, nil
}
method := "GetLoadingProgress"
@ -1545,7 +1545,7 @@ func (node *Proxy) GetLoadingProgress(ctx context.Context, request *milvuspb.Get
}
func (node *Proxy) GetLoadState(ctx context.Context, request *milvuspb.GetLoadStateRequest) (*milvuspb.GetLoadStateResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetLoadStateResponse{Status: unhealthyStatus()}, nil
}
method := "GetLoadState"
@ -1645,7 +1645,7 @@ func (node *Proxy) GetLoadState(ctx context.Context, request *milvuspb.GetLoadSt
// CreateIndex create index for collection.
func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1716,7 +1716,7 @@ func (node *Proxy) CreateIndex(ctx context.Context, request *milvuspb.CreateInde
// DescribeIndex get the meta information of index, such as index state, index id and etc.
func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.DescribeIndexResponse{
Status: unhealthyStatus(),
}, nil
@ -1800,7 +1800,7 @@ func (node *Proxy) DescribeIndex(ctx context.Context, request *milvuspb.Describe
// GetIndexStatistics get the information of index.
func (node *Proxy) GetIndexStatistics(ctx context.Context, request *milvuspb.GetIndexStatisticsRequest) (*milvuspb.GetIndexStatisticsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
return &milvuspb.GetIndexStatisticsResponse{
Status: merr.Status(err),
@ -1878,7 +1878,7 @@ func (node *Proxy) GetIndexStatistics(ctx context.Context, request *milvuspb.Get
// DropIndex drop the index of collection.
func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -1950,7 +1950,7 @@ func (node *Proxy) DropIndex(ctx context.Context, request *milvuspb.DropIndexReq
// IndexRows is the num of indexed rows. And TotalRows is the total number of segment rows.
// Deprecated: use DescribeIndex instead
func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.GetIndexBuildProgressRequest) (*milvuspb.GetIndexBuildProgressResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetIndexBuildProgressResponse{
Status: unhealthyStatus(),
}, nil
@ -2026,7 +2026,7 @@ func (node *Proxy) GetIndexBuildProgress(ctx context.Context, request *milvuspb.
// GetIndexState get the build-state of index.
// Deprecated: use DescribeIndex instead
func (node *Proxy) GetIndexState(ctx context.Context, request *milvuspb.GetIndexStateRequest) (*milvuspb.GetIndexStateResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.GetIndexStateResponse{
Status: unhealthyStatus(),
}, nil
@ -2105,7 +2105,7 @@ func (node *Proxy) Insert(ctx context.Context, request *milvuspb.InsertRequest)
ctx, sp := otel.Tracer(typeutil.ProxyRole).Start(ctx, "Proxy-Insert")
defer sp.End()
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.MutationResult{
Status: unhealthyStatus(),
}, nil
@ -2231,7 +2231,7 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
strconv.FormatInt(paramtable.GetNodeID(), 10),
metrics.DeleteLabel, request.GetCollectionName()).Add(float64(proto.Size(request)))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.MutationResult{
Status: unhealthyStatus(),
}, nil
@ -2300,7 +2300,7 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
)
log.Debug("Start processing upsert request in Proxy")
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.MutationResult{
Status: unhealthyStatus(),
}, nil
@ -2382,7 +2382,7 @@ func (node *Proxy) Upsert(ctx context.Context, request *milvuspb.UpsertRequest)
if it.result.GetStatus().GetErrorCode() == commonpb.ErrorCode_Success {
it.result.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
}
return constructFailedResponse(err, it.result.Status.ErrorCode), nil
return constructFailedResponse(err, it.result.GetStatus().GetErrorCode()), nil
}
if it.result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
@ -2424,7 +2424,7 @@ func (node *Proxy) Search(ctx context.Context, request *milvuspb.SearchRequest)
rateCol.Add(internalpb.RateType_DQLSearch.String(), float64(request.GetNq()))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.SearchResults{
Status: unhealthyStatus(),
}, nil
@ -2537,7 +2537,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
Reason: "",
},
}
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status.Reason = "proxy is not healthy"
return resp, nil
}
@ -2570,7 +2570,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.AbandonLabel).Inc()
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -2589,7 +2589,7 @@ func (node *Proxy) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel).Inc()
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -2616,7 +2616,7 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*
rateCol.Add(internalpb.RateType_DQLQuery.String(), 1)
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.QueryResults{
Status: unhealthyStatus(),
}, nil
@ -2721,7 +2721,7 @@ func (node *Proxy) Query(ctx context.Context, request *milvuspb.QueryRequest) (*
// CreateAlias create alias for collection, then you can search the collection with alias.
func (node *Proxy) CreateAlias(ctx context.Context, request *milvuspb.CreateAliasRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -2803,7 +2803,7 @@ func (node *Proxy) ListAliases(ctx context.Context, request *milvuspb.ListAliase
// DropAlias alter the alias of collection.
func (node *Proxy) DropAlias(ctx context.Context, request *milvuspb.DropAliasRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -2866,7 +2866,7 @@ func (node *Proxy) DropAlias(ctx context.Context, request *milvuspb.DropAliasReq
// AlterAlias alter alias of collection.
func (node *Proxy) AlterAlias(ctx context.Context, request *milvuspb.AlterAliasRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -2947,7 +2947,7 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
resp := &milvuspb.FlushAllResponse{
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UnexpectedError},
}
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status.Reason = "proxy is not healthy"
return resp, nil
}
@ -2956,10 +2956,10 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
hasError := func(status *commonpb.Status, err error) bool {
if err != nil {
resp.Status = merr.Status(err)
log.Warn("FlushAll failed", zap.String("err", err.Error()))
log.Warn("FlushAll failed", zap.Error(err))
return true
}
if status != nil && status.ErrorCode != commonpb.ErrorCode_Success {
if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("FlushAll failed", zap.String("err", status.GetReason()))
resp.Status = status
return true
@ -3022,7 +3022,7 @@ func (node *Proxy) FlushAll(ctx context.Context, req *milvuspb.FlushAllRequest)
ts, err := node.tsoAllocator.AllocOne(ctx)
if err != nil {
log.Warn("FlushAll failed", zap.Error(err))
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -3061,8 +3061,8 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
ErrorCode: commonpb.ErrorCode_UnexpectedError,
},
}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
method := "GetPersistentSegmentInfo"
@ -3107,15 +3107,16 @@ func (node *Proxy) GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.G
resp.Status.Reason = fmt.Errorf("dataCoord:GetSegmentInfo, err:%w", err).Error()
return resp, nil
}
err = merr.Error(infoResp.GetStatus())
if err != nil {
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method,
metrics.FailLabel).Inc()
resp.Status = merr.Status(err)
return resp, nil
}
log.Debug("GetPersistentSegmentInfo",
zap.Int("len(infos)", len(infoResp.Infos)),
zap.Any("status", infoResp.Status))
if infoResp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method,
metrics.FailLabel).Inc()
resp.Status.Reason = infoResp.Status.Reason
return resp, nil
}
persistentInfos := make([]*milvuspb.PersistentSegmentInfo, len(infoResp.Infos))
for i, info := range infoResp.Infos {
persistentInfos[i] = &milvuspb.PersistentSegmentInfo{
@ -3151,8 +3152,8 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
ErrorCode: commonpb.ErrorCode_UnexpectedError,
},
}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3164,7 +3165,7 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
collID, err := globalMetaCache.GetCollectionID(ctx, req.GetDbName(), req.CollectionName)
if err != nil {
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel).Inc()
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
infoResp, err := node.queryCoord.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{
@ -3175,23 +3176,19 @@ func (node *Proxy) GetQuerySegmentInfo(ctx context.Context, req *milvuspb.GetQue
),
CollectionID: collID,
})
if err == nil {
err = merr.Error(infoResp.GetStatus())
}
if err != nil {
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel).Inc()
log.Error("Failed to get segment info from QueryCoord",
zap.Error(err))
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
log.Debug("GetQuerySegmentInfo",
zap.Any("infos", infoResp.Infos),
zap.Any("status", infoResp.Status))
if infoResp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
metrics.ProxyFunctionCall.WithLabelValues(strconv.FormatInt(paramtable.GetNodeID(), 10), method, metrics.FailLabel).Inc()
log.Error("Failed to get segment info from QueryCoord",
zap.String("errMsg", infoResp.Status.Reason))
resp.Status.Reason = infoResp.Status.Reason
return resp, nil
}
queryInfos := make([]*milvuspb.QuerySegmentInfo, len(infoResp.Infos))
for i, info := range infoResp.Infos {
queryInfos[i] = &milvuspb.QuerySegmentInfo{
@ -3309,7 +3306,7 @@ func (node *Proxy) GetMetrics(ctx context.Context, req *milvuspb.GetMetricsReque
zap.Int64("nodeID", paramtable.GetNodeID()),
zap.String("req", req.Request))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
log.Warn("Proxy.GetMetrics failed",
zap.Int64("nodeID", paramtable.GetNodeID()),
@ -3382,7 +3379,7 @@ func (node *Proxy) GetProxyMetrics(ctx context.Context, req *milvuspb.GetMetrics
zap.Int64("nodeID", paramtable.GetNodeID()),
zap.String("req", req.Request))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
err := merr.WrapErrServiceNotReady(fmt.Sprintf("proxy %d is unhealthy", paramtable.GetNodeID()))
log.Warn("Proxy.GetProxyMetrics failed",
zap.Error(err))
@ -3450,7 +3447,7 @@ func (node *Proxy) LoadBalance(ctx context.Context, req *milvuspb.LoadBalanceReq
zap.Int64("proxy_id", paramtable.GetNodeID()),
zap.Any("req", req))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -3509,8 +3506,8 @@ func (node *Proxy) GetReplicas(ctx context.Context, req *milvuspb.GetReplicasReq
zap.Int64("collection", req.GetCollectionID()),
zap.Bool("with shard nodes", req.GetWithShardNodes()))
resp := &milvuspb.GetReplicasResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3544,8 +3541,8 @@ func (node *Proxy) GetCompactionState(ctx context.Context, req *milvuspb.GetComp
log.Debug("received GetCompactionState request")
resp := &milvuspb.GetCompactionStateResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3566,8 +3563,8 @@ func (node *Proxy) ManualCompaction(ctx context.Context, req *milvuspb.ManualCom
log.Info("received ManualCompaction request")
resp := &milvuspb.ManualCompactionResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3588,8 +3585,8 @@ func (node *Proxy) GetCompactionStateWithPlans(ctx context.Context, req *milvusp
log.Debug("received GetCompactionStateWithPlans request")
resp := &milvuspb.GetCompactionPlansResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3611,8 +3608,8 @@ func (node *Proxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStat
zap.Any("request", req))
var err error
resp := &milvuspb.GetFlushStateResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
log.Warn("unable to get flush state because of closed server")
return resp, nil
}
@ -3658,8 +3655,8 @@ func (node *Proxy) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushA
var err error
resp := &milvuspb.GetFlushAllStateResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
log.Warn("GetFlushAllState failed, closed server")
return resp, nil
}
@ -3667,7 +3664,7 @@ func (node *Proxy) GetFlushAllState(ctx context.Context, req *milvuspb.GetFlushA
resp, err = node.dataCoord.GetFlushAllState(ctx, req)
if err != nil {
resp.Status = merr.Status(err)
log.Warn("GetFlushAllState failed", zap.String("err", err.Error()))
log.Warn("GetFlushAllState failed", zap.Error(err))
return resp, nil
}
log.Debug("GetFlushAllState done", zap.Bool("flushed", resp.GetFlushed()))
@ -3707,8 +3704,8 @@ func (node *Proxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*mi
resp := &milvuspb.ImportResponse{
Status: merr.Status(nil),
}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
@ -3733,7 +3730,7 @@ func (node *Proxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*mi
log.Error("failed to execute bulk insert request",
zap.Error(err))
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -3752,8 +3749,8 @@ func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportSt
log.Debug("received get import state request",
zap.Int64("taskID", req.GetTask()))
resp := &milvuspb.GetImportStateResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
method := "GetImportState"
@ -3767,7 +3764,7 @@ func (node *Proxy) GetImportState(ctx context.Context, req *milvuspb.GetImportSt
log.Error("failed to execute get import state",
zap.Error(err))
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -3788,8 +3785,8 @@ func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImport
log.Debug("received list import tasks request")
resp := &milvuspb.ListImportTasksResponse{}
if !node.checkHealthy() {
resp.Status = unhealthyStatus()
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp.Status = merr.Status(err)
return resp, nil
}
method := "ListImportTasks"
@ -3802,7 +3799,7 @@ func (node *Proxy) ListImportTasks(ctx context.Context, req *milvuspb.ListImport
log.Error("failed to execute list import tasks",
zap.Error(err))
resp.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
resp.Status.Reason = err.Error()
resp.Status = merr.Status(err)
return resp, nil
}
@ -3824,7 +3821,7 @@ func (node *Proxy) InvalidateCredentialCache(ctx context.Context, request *proxy
zap.String("username", request.Username))
log.Debug("received request to invalidate credential cache")
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -3847,7 +3844,7 @@ func (node *Proxy) UpdateCredentialCache(ctx context.Context, request *proxypb.U
zap.String("username", request.Username))
log.Debug("received request to update credential cache")
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -3872,7 +3869,7 @@ func (node *Proxy) CreateCredential(ctx context.Context, req *milvuspb.CreateCre
log.Debug("CreateCredential",
zap.String("role", typeutil.ProxyRole))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
// validate params
@ -3933,7 +3930,7 @@ func (node *Proxy) UpdateCredential(ctx context.Context, req *milvuspb.UpdateCre
log.Debug("UpdateCredential",
zap.String("role", typeutil.ProxyRole))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
rawOldPassword, err := crypto.Base64Decode(req.OldPassword)
@ -4012,7 +4009,7 @@ func (node *Proxy) DeleteCredential(ctx context.Context, req *milvuspb.DeleteCre
log.Debug("DeleteCredential",
zap.String("role", typeutil.ProxyRole))
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4039,7 +4036,7 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
zap.String("role", typeutil.ProxyRole))
log.Debug("ListCredUsers")
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.ListCredUsersResponse{Status: unhealthyStatus()}, nil
}
rootCoordReq := &milvuspb.ListCredUsersRequest{
@ -4372,7 +4369,7 @@ func (node *Proxy) SetRates(ctx context.Context, request *proxypb.SetRatesReques
resp := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
}
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
resp = unhealthyStatus()
return resp, nil
}
@ -4388,7 +4385,7 @@ func (node *Proxy) SetRates(ctx context.Context, request *proxypb.SetRatesReques
}
func (node *Proxy) CheckHealth(ctx context.Context, request *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
reason := errorutil.UnHealthReason("proxy", node.session.ServerID, "proxy is unhealthy")
return &milvuspb.CheckHealthResponse{
Status: unhealthyStatus(),
@ -4468,7 +4465,7 @@ func (node *Proxy) RenameCollection(ctx context.Context, req *milvuspb.RenameCol
log.Info("received rename collection request")
var err error
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4498,7 +4495,7 @@ func (node *Proxy) RenameCollection(ctx context.Context, req *milvuspb.RenameCol
}
func (node *Proxy) CreateResourceGroup(ctx context.Context, request *milvuspb.CreateResourceGroupRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4566,7 +4563,7 @@ func getErrResponse(err error, method string) *commonpb.Status {
}
func (node *Proxy) DropResourceGroup(ctx context.Context, request *milvuspb.DropResourceGroupRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4619,7 +4616,7 @@ func (node *Proxy) DropResourceGroup(ctx context.Context, request *milvuspb.Drop
}
func (node *Proxy) TransferNode(ctx context.Context, request *milvuspb.TransferNodeRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4686,7 +4683,7 @@ func (node *Proxy) TransferNode(ctx context.Context, request *milvuspb.TransferN
}
func (node *Proxy) TransferReplica(ctx context.Context, request *milvuspb.TransferReplicaRequest) (*commonpb.Status, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return unhealthyStatus(), nil
}
@ -4753,7 +4750,7 @@ func (node *Proxy) TransferReplica(ctx context.Context, request *milvuspb.Transf
}
func (node *Proxy) ListResourceGroups(ctx context.Context, request *milvuspb.ListResourceGroupsRequest) (*milvuspb.ListResourceGroupsResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.ListResourceGroupsResponse{
Status: unhealthyStatus(),
}, nil
@ -4816,7 +4813,7 @@ func (node *Proxy) ListResourceGroups(ctx context.Context, request *milvuspb.Lis
}
func (node *Proxy) DescribeResourceGroup(ctx context.Context, request *milvuspb.DescribeResourceGroupRequest) (*milvuspb.DescribeResourceGroupResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.DescribeResourceGroupResponse{
Status: unhealthyStatus(),
}, nil
@ -4897,7 +4894,7 @@ func (node *Proxy) DescribeSegmentIndexData(ctx context.Context, request *federp
}
func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest) (*milvuspb.ConnectResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.ConnectResponse{Status: unhealthyStatus()}, nil
}
@ -4912,6 +4909,9 @@ func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest
commonpbutil.WithMsgType(commonpb.MsgType_ListDatabases),
),
})
if err == nil {
err = merr.Error(resp.GetStatus())
}
if err != nil {
log.Info("connect failed, failed to list databases", zap.Error(err))
@ -4920,15 +4920,6 @@ func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest
}, nil
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
log.Info("connect failed, failed to list databases",
zap.String("code", resp.GetStatus().GetErrorCode().String()),
zap.String("reason", resp.GetStatus().GetReason()))
return &milvuspb.ConnectResponse{
Status: proto.Clone(resp.GetStatus()).(*commonpb.Status),
}, nil
}
if !funcutil.SliceContain(resp.GetDbNames(), db) {
log.Info("connect failed, target database not exist")
return &milvuspb.ConnectResponse{
@ -4966,7 +4957,7 @@ func (node *Proxy) Connect(ctx context.Context, request *milvuspb.ConnectRequest
}
func (node *Proxy) ListClientInfos(ctx context.Context, req *proxypb.ListClientInfosRequest) (*proxypb.ListClientInfosResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &proxypb.ListClientInfosResponse{Status: unhealthyStatus()}, nil
}
@ -4979,7 +4970,7 @@ func (node *Proxy) ListClientInfos(ctx context.Context, req *proxypb.ListClientI
}
func (node *Proxy) AllocTimestamp(ctx context.Context, req *milvuspb.AllocTimestampRequest) (*milvuspb.AllocTimestampResponse, error) {
if !node.checkHealthy() {
if err := merr.CheckHealthy(node.stateCode.Load().(commonpb.StateCode)); err != nil {
return &milvuspb.AllocTimestampResponse{Status: unhealthyStatus()}, nil
}

View File

@ -35,6 +35,7 @@ import (
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/pkg/log"
"github.com/milvus-io/milvus/pkg/util/merr"
"github.com/milvus-io/milvus/pkg/util/paramtable"
)
@ -279,7 +280,7 @@ func TestProxy_ResourceGroup(t *testing.T) {
qc.EXPECT().ListResourceGroups(mock.Anything, mock.Anything).Return(&milvuspb.ListResourceGroupsResponse{Status: successStatus}, nil)
resp, err := node.ListResourceGroups(ctx, &milvuspb.ListResourceGroupsRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
})
t.Run("describe resource group", func(t *testing.T) {
@ -298,7 +299,7 @@ func TestProxy_ResourceGroup(t *testing.T) {
ResourceGroup: "rg",
})
assert.NoError(t, err)
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
})
}
@ -421,7 +422,7 @@ func TestProxy_FlushAll_DbCollection(t *testing.T) {
resp, err := node.FlushAll(ctx, test.FlushRequest)
assert.NoError(t, err)
if test.ExpectedSuccess {
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
} else {
assert.NotEqual(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
}
@ -475,7 +476,7 @@ func TestProxy_FlushAll(t *testing.T) {
t.Run("FlushAll", func(t *testing.T) {
resp, err := node.FlushAll(ctx, &milvuspb.FlushAllRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
})
t.Run("FlushAll failed, server is abnormal", func(t *testing.T) {
@ -568,14 +569,14 @@ func TestProxy_GetFlushAllState(t *testing.T) {
t.Run("GetFlushAllState success", func(t *testing.T) {
resp, err := node.GetFlushAllState(ctx, &milvuspb.GetFlushAllStateRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
})
t.Run("GetFlushAllState failed, server is abnormal", func(t *testing.T) {
node.stateCode.Store(commonpb.StateCode_Abnormal)
resp, err := node.GetFlushAllState(ctx, &milvuspb.GetFlushAllStateRequest{})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
node.stateCode.Store(commonpb.StateCode_Healthy)
})
@ -617,7 +618,7 @@ func TestProxy_GetReplicas(t *testing.T) {
CollectionID: 1000,
})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, merr.Ok(resp.GetStatus()))
})
t.Run("proxy_not_healthy", func(t *testing.T) {
@ -626,7 +627,7 @@ func TestProxy_GetReplicas(t *testing.T) {
CollectionID: 1000,
})
assert.NoError(t, err)
assert.Equal(t, resp.GetStatus().GetErrorCode(), commonpb.ErrorCode_UnexpectedError)
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
node.stateCode.Store(commonpb.StateCode_Healthy)
})
@ -926,7 +927,7 @@ func TestProxyListDatabase(t *testing.T) {
ctx := context.Background()
resp, err := node.ListDatabases(ctx, &milvuspb.ListDatabasesRequest{})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
assert.ErrorIs(t, merr.Error(resp.GetStatus()), merr.ErrServiceNotReady)
})
factory := dependency.NewDefaultFactory(true)

View File

@ -622,7 +622,7 @@ func (m *MetaCache) showPartitions(ctx context.Context, dbName string, collectio
return nil, err
}
if partitions.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return nil, fmt.Errorf("%s", partitions.Status.Reason)
return nil, fmt.Errorf("%s", partitions.GetStatus().GetReason())
}
if len(partitions.PartitionIDs) != len(partitions.PartitionNames) {
@ -813,15 +813,15 @@ func (m *MetaCache) GetShards(ctx context.Context, withCache bool, database, col
}
// do not retry unless got NoReplicaAvailable from querycoord
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_NoReplicaAvailable {
return retry.Unrecoverable(fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.Status.Reason))
return retry.Unrecoverable(fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.GetStatus().GetReason()))
}
return fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.Status.Reason)
return fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.GetStatus().GetReason())
})
if err != nil {
return nil, err
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return nil, fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.Status.Reason)
return nil, fmt.Errorf("fail to get shard leaders from QueryCoord: %s", resp.GetStatus().GetReason())
}
shards := parseShardLeaderList2QueryNode(resp.GetShards())

View File

@ -334,7 +334,7 @@ func (node *Proxy) sendChannelsTimeTickLoop() {
log.Warn("sendChannelsTimeTickLoop.UpdateChannelTimeTick", zap.Error(err))
continue
}
if status.ErrorCode != 0 {
if status.GetErrorCode() != 0 {
log.Warn("sendChannelsTimeTickLoop.UpdateChannelTimeTick",
zap.Any("ErrorCode", status.ErrorCode),
zap.Any("Reason", status.Reason))

File diff suppressed because it is too large Load Diff

View File

@ -325,7 +325,7 @@ func (sa *segIDAssigner) syncSegments() (bool, error) {
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return false, fmt.Errorf("syncSegmentID Failed:%s", resp.Status.Reason)
return false, fmt.Errorf("syncSegmentID Failed:%s", resp.GetStatus().GetReason())
}
var errMsg string
@ -333,8 +333,8 @@ func (sa *segIDAssigner) syncSegments() (bool, error) {
success := true
for _, segAssign := range resp.SegIDAssignments {
if segAssign.Status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("proxy", zap.String("SyncSegment Error", segAssign.Status.Reason))
errMsg += segAssign.Status.Reason
log.Warn("proxy", zap.String("SyncSegment Error", segAssign.GetStatus().GetReason()))
errMsg += segAssign.GetStatus().GetReason()
errMsg += "\n"
success = false
continue

View File

@ -435,7 +435,7 @@ func (hct *hasCollectionTask) Execute(ctx context.Context) error {
return errors.New("has collection resp is nil")
}
if hct.result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(hct.result.Status.Reason)
return errors.New(hct.result.GetStatus().GetReason())
}
return nil
}
@ -529,7 +529,7 @@ func (dct *describeCollectionTask) Execute(ctx context.Context) error {
err := merr.Error(dct.result.GetStatus())
if errors.Is(err, merr.ErrCollectionNotFound) {
dct.result.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
dct.result.Status.Reason = "can't find collection " + dct.result.Status.Reason
dct.result.Status.Reason = "can't find collection " + dct.result.GetStatus().GetReason()
}
} else {
dct.result.Schema.Name = result.Schema.Name
@ -646,7 +646,7 @@ func (sct *showCollectionsTask) Execute(ctx context.Context) error {
}
if respFromRootCoord.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(respFromRootCoord.Status.Reason)
return errors.New(respFromRootCoord.GetStatus().GetReason())
}
if sct.GetType() == milvuspb.ShowType_InMemory {
@ -685,7 +685,7 @@ func (sct *showCollectionsTask) Execute(ctx context.Context) error {
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
// update collectionID to collection name, and return new error info to sdk
newErrorReason := resp.Status.Reason
newErrorReason := resp.GetStatus().GetReason()
for _, collectionID := range collectionIDs {
newErrorReason = ReplaceID2Name(newErrorReason, collectionID, IDs2Names[collectionID])
}
@ -1060,7 +1060,7 @@ func (hpt *hasPartitionTask) Execute(ctx context.Context) (err error) {
return err
}
if hpt.result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(hpt.result.Status.Reason)
return errors.New(hpt.result.GetStatus().GetReason())
}
return err
}
@ -1145,7 +1145,7 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
}
if respFromRootCoord.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(respFromRootCoord.Status.Reason)
return errors.New(respFromRootCoord.GetStatus().GetReason())
}
if spt.GetType() == milvuspb.ShowType_InMemory {
@ -1189,7 +1189,7 @@ func (spt *showPartitionsTask) Execute(ctx context.Context) error {
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(resp.Status.Reason)
return errors.New(resp.GetStatus().GetReason())
}
spt.result = &milvuspb.ShowPartitionsResponse{
@ -1305,7 +1305,7 @@ func (ft *flushTask) Execute(ctx context.Context) error {
return fmt.Errorf("failed to call flush to data coordinator: %s", err.Error())
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(resp.Status.Reason)
return errors.New(resp.GetStatus().GetReason())
}
coll2Segments[collName] = &schemapb.LongArray{Data: resp.GetSegmentIDs()}
flushColl2Segments[collName] = &schemapb.LongArray{Data: resp.GetFlushSegmentIDs()}
@ -1421,7 +1421,7 @@ func (lct *loadCollectionTask) Execute(ctx context.Context) (err error) {
return err
}
if indexResponse.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(indexResponse.Status.Reason)
return errors.New(indexResponse.GetStatus().GetReason())
}
hasVecIndex := false
@ -1649,7 +1649,7 @@ func (lpt *loadPartitionsTask) Execute(ctx context.Context) error {
return err
}
if indexResponse.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(indexResponse.Status.Reason)
return errors.New(indexResponse.GetStatus().GetReason())
}
hasVecIndex := false

View File

@ -496,7 +496,7 @@ func (dit *describeIndexTask) Execute(ctx context.Context) error {
dit.result = &milvuspb.DescribeIndexResponse{}
dit.result.Status = resp.GetStatus()
if dit.result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(dit.result.Status.Reason)
return errors.New(dit.result.GetStatus().GetReason())
}
for _, indexInfo := range resp.IndexInfos {
field, err := schemaHelper.GetFieldFromID(indexInfo.FieldID)
@ -615,7 +615,7 @@ func (dit *getIndexStatisticsTask) Execute(ctx context.Context) error {
dit.result = &milvuspb.GetIndexStatisticsResponse{}
dit.result.Status = resp.GetStatus()
if dit.result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(dit.result.Status.Reason)
return errors.New(dit.result.GetStatus().GetReason())
}
for _, indexInfo := range resp.IndexInfos {
field, err := schemaHelper.GetFieldFromID(indexInfo.FieldID)

View File

@ -69,7 +69,7 @@ func TestSearchTask_PostExecute(t *testing.T) {
err := qt.PostExecute(context.TODO())
assert.NoError(t, err)
assert.Equal(t, qt.result.Status.ErrorCode, commonpb.ErrorCode_Success)
assert.Equal(t, qt.result.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
})
}

View File

@ -242,7 +242,7 @@ func (g *getStatisticsTask) getStatisticsFromDataCoord(ctx context.Context) erro
return err
}
if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(result.Status.Reason)
return merr.Error(result.GetStatus())
}
if g.resultBuf == nil {
g.resultBuf = typeutil.NewConcurrentSet[*internalpb.GetStatisticsResponse]()
@ -463,7 +463,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return err
// }
// if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
// return errors.New(result.Status.Reason)
// return merr.Error(result.GetStatus())
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: merr.Status(nil),
@ -535,7 +535,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return err
// }
// if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
// return errors.New(result.Status.Reason)
// return merr.Error(result.GetStatus())
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: merr.Status(nil),
@ -558,7 +558,7 @@ func reduceStatisticResponse(results []map[string]string) ([]*commonpb.KeyValueP
// return err
// }
// if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
// return errors.New(result.Status.Reason)
// return merr.Error(result.GetStatus())
// }
// g.toReduceResults = append(g.toReduceResults, &internalpb.GetStatisticsResponse{
// Status: merr.Status(nil),
@ -657,7 +657,7 @@ func (g *getCollectionStatisticsTask) Execute(ctx context.Context) error {
return err
}
if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(result.Status.Reason)
return merr.Error(result.GetStatus())
}
g.result = &milvuspb.GetCollectionStatisticsResponse{
Status: merr.Status(nil),
@ -747,7 +747,7 @@ func (g *getPartitionStatisticsTask) Execute(ctx context.Context) error {
return errors.New("get partition statistics resp is nil")
}
if result.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(result.Status.Reason)
return merr.Error(result.GetStatus())
}
g.result = &milvuspb.GetPartitionStatisticsResponse{
Status: merr.Status(nil),

View File

@ -1030,7 +1030,7 @@ func TestDescribeCollectionTask(t *testing.T) {
assert.NoError(t, err)
err = task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.GetStatus().GetErrorCode())
}
func TestDescribeCollectionTask_ShardsNum1(t *testing.T) {
@ -1092,7 +1092,7 @@ func TestDescribeCollectionTask_ShardsNum1(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.GetStatus().GetErrorCode())
assert.Equal(t, shardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
}
@ -1156,7 +1156,7 @@ func TestDescribeCollectionTask_EnableDynamicSchema(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.GetStatus().GetErrorCode())
assert.Equal(t, shardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
assert.Equal(t, 2, len(task.result.Schema.Fields))
@ -1222,7 +1222,7 @@ func TestDescribeCollectionTask_ShardsNum2(t *testing.T) {
err = task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.GetStatus().GetErrorCode())
assert.Equal(t, common.DefaultShardsNum, task.result.ShardsNum)
assert.Equal(t, collectionName, task.result.GetCollectionName())
rc.Stop()
@ -2970,7 +2970,7 @@ func TestListResourceGroupsTask(t *testing.T) {
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.GetStatus().GetErrorCode())
groups := task.result.GetResourceGroups()
assert.Contains(t, groups, meta.DefaultResourceGroupName)
assert.Contains(t, groups, "rg")
@ -3023,7 +3023,7 @@ func TestDescribeResourceGroupTask(t *testing.T) {
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.result.GetStatus().GetErrorCode())
groupInfo := task.result.GetResourceGroup()
outgoingNodeNum := groupInfo.GetNumOutgoingNode()
incomingNodeNum := groupInfo.GetNumIncomingNode()
@ -3071,7 +3071,7 @@ func TestDescribeResourceGroupTaskFailed(t *testing.T) {
err := task.Execute(ctx)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.result.GetStatus().GetErrorCode())
qc.ExpectedCalls = nil
qc.EXPECT().Stop().Return(nil)

View File

@ -67,7 +67,7 @@ func (ta *timestampAllocator) alloc(ctx context.Context, count uint32) ([]Timest
return nil, fmt.Errorf("syncTimestamp Failed:%w", err)
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return nil, fmt.Errorf("syncTimeStamp Failed:%s", resp.Status.Reason)
return nil, fmt.Errorf("syncTimeStamp Failed:%s", resp.GetStatus().GetReason())
}
if resp == nil {
return nil, fmt.Errorf("empty AllocTimestampResponse")

View File

@ -1012,7 +1012,7 @@ func isCollectionLoaded(ctx context.Context, qc types.QueryCoord, collID int64)
return false, err
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return false, errors.New(resp.Status.Reason)
return false, errors.New(resp.GetStatus().GetReason())
}
for _, loadedCollID := range resp.GetCollectionIDs() {
@ -1033,7 +1033,7 @@ func isPartitionLoaded(ctx context.Context, qc types.QueryCoord, collID int64, p
return false, err
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return false, errors.New(resp.Status.Reason)
return false, errors.New(resp.GetStatus().GetReason())
}
for _, loadedPartID := range resp.GetPartitionIDs() {
@ -1212,7 +1212,7 @@ func getCollectionProgress(
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
err = merr.Error(resp.GetStatus())
log.Warn("fail to show collections", zap.Int64("collection_id", collectionID),
zap.String("reason", resp.Status.Reason))
zap.String("reason", resp.GetStatus().GetReason()))
return
}
@ -1285,7 +1285,7 @@ func getPartitionProgress(
log.Warn("fail to show partitions",
zap.String("collection_name", collectionName),
zap.Strings("partition_names", partitionNames),
zap.String("reason", resp.Status.Reason))
zap.String("reason", resp.GetStatus().GetReason()))
return
}

View File

@ -1647,7 +1647,7 @@ func Test_UpsertTaskCheckPrimaryFieldData(t *testing.T) {
case4.schema.Fields[0].IsPrimaryKey = true
case4.schema.Fields[0].AutoID = true
_, err = checkPrimaryFieldData(case4.schema, case4.result, case4.insertMsg, false)
assert.Equal(t, commonpb.ErrorCode_UpsertAutoIDTrue, case4.result.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UpsertAutoIDTrue, case4.result.GetStatus().GetErrorCode())
assert.NotEqual(t, nil, err)
// primary field data is nil, GetPrimaryFieldData fail

View File

@ -192,19 +192,32 @@ func (broker *CoordinatorBroker) GetIndexInfo(ctx context.Context, collectionID
ctx, cancel := context.WithTimeout(ctx, paramtable.Get().QueryCoordCfg.BrokerTimeout.GetAsDuration(time.Millisecond))
defer cancel()
log := log.Ctx(ctx).With(
zap.Int64("collectionID", collectionID),
zap.Int64("segmentID", segmentID),
)
resp, err := broker.dataCoord.GetIndexInfos(ctx, &indexpb.GetIndexInfoRequest{
CollectionID: collectionID,
SegmentIDs: []int64{segmentID},
})
if err != nil || resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
if err == nil {
err = merr.Error(resp.GetStatus())
}
if err != nil {
log.Warn("failed to get segment index info",
zap.Error(err))
return nil, err
}
if resp.GetSegmentInfo() == nil {
err = merr.WrapErrCollectionNotFound(segmentID)
log.Warn("failed to get segment index info",
zap.Int64("collection", collectionID),
zap.Int64("segment", segmentID),
zap.Error(err))
return nil, err
}
segmentInfo, ok := resp.SegmentInfo[segmentID]
segmentInfo, ok := resp.GetSegmentInfo()[segmentID]
if !ok || len(segmentInfo.GetIndexInfos()) == 0 {
return nil, merr.WrapErrIndexNotFound()
}

View File

@ -355,7 +355,7 @@ func (ex *Executor) releaseSegment(task *SegmentTask, step int) {
log.Warn("failed to release segment, it may be a false failure", zap.Error(err))
return
}
if status.ErrorCode != commonpb.ErrorCode_Success {
if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("failed to release segment", zap.String("reason", status.GetReason()))
return
}

View File

@ -72,7 +72,7 @@ func (w *remoteWorker) LoadSegments(ctx context.Context, req *querypb.LoadSegmen
zap.Error(err),
)
return err
} else if status.ErrorCode != commonpb.ErrorCode_Success {
} else if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("failed to call LoadSegments, worker return error",
zap.String("errorCode", status.GetErrorCode().String()),
zap.String("reason", status.GetReason()),
@ -92,7 +92,7 @@ func (w *remoteWorker) ReleaseSegments(ctx context.Context, req *querypb.Release
zap.Error(err),
)
return err
} else if status.ErrorCode != commonpb.ErrorCode_Success {
} else if status.GetErrorCode() != commonpb.ErrorCode_Success {
log.Warn("failed to call ReleaseSegments, worker return error",
zap.String("errorCode", status.GetErrorCode().String()),
zap.String("reason", status.GetReason()),

View File

@ -137,18 +137,16 @@ func (node *QueryNode) GetStatistics(ctx context.Context, req *querypb.GetStatis
}
runningGp.Go(func() error {
ret, err := node.getChannelStatistics(runningCtx, req, ch)
if err == nil {
err = merr.Error(ret.GetStatus())
}
mu.Lock()
defer mu.Unlock()
if err != nil {
failRet.Status = merr.Status(err)
failRet.Status.ErrorCode = commonpb.ErrorCode_UnexpectedError
return err
}
if ret.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
failRet.Status.Reason = ret.Status.Reason
failRet.Status.ErrorCode = ret.Status.ErrorCode
return fmt.Errorf("%s", ret.Status.Reason)
}
toReduceResults = append(toReduceResults, ret)
return nil
})
@ -1041,12 +1039,12 @@ func (node *QueryNode) Query(ctx context.Context, req *querypb.QueryRequest) (*i
idx := i
runningGp.Go(func() error {
ret, err := node.queryChannel(runningCtx, req, ch)
if err == nil {
err = merr.Error(ret.GetStatus())
}
if err != nil {
return err
}
if ret.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return fmt.Errorf("%s", ret.Status.Reason)
}
toMergeResults[idx] = ret
return nil
})

View File

@ -168,14 +168,14 @@ func (suite *ServiceSuite) TestGetComponentStatesNormal() {
suite.node.session.UpdateRegistered(true)
rsp, err := suite.node.GetComponentStates(ctx)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, rsp.GetStatus().GetErrorCode())
suite.Equal(commonpb.StateCode_Healthy, rsp.State.StateCode)
// after update
suite.node.UpdateStateCode(commonpb.StateCode_Abnormal)
rsp, err = suite.node.GetComponentStates(ctx)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, rsp.GetStatus().GetErrorCode())
suite.Equal(commonpb.StateCode_Abnormal, rsp.State.StateCode)
}
@ -183,14 +183,14 @@ func (suite *ServiceSuite) TestGetTimeTiclChannel_Normal() {
ctx := context.Background()
rsp, err := suite.node.GetTimeTickChannel(ctx)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, rsp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetStatisChannel_Normal() {
ctx := context.Background()
rsp, err := suite.node.GetStatisticsChannel(ctx)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, rsp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetStatistics_Normal() {
@ -214,7 +214,7 @@ func (suite *ServiceSuite) TestGetStatistics_Normal() {
rsp, err := suite.node.GetStatistics(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, rsp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetStatistics_Failed() {
@ -1612,7 +1612,7 @@ func (suite *ServiceSuite) TestShowConfigurations_Normal() {
resp, err := suite.node.ShowConfigurations(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, resp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
suite.Equal(1, len(resp.Configuations))
}
@ -1650,7 +1650,7 @@ func (suite *ServiceSuite) TestGetMetric_Normal() {
resp, err := suite.node.GetMetrics(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, resp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetMetric_Failed() {
@ -1678,13 +1678,13 @@ func (suite *ServiceSuite) TestGetMetric_Failed() {
req.Request = "---"
resp, err = suite.node.GetMetrics(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
// node unhealthy
suite.node.UpdateStateCode(commonpb.StateCode_Abnormal)
resp, err = suite.node.GetMetrics(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_NotReadyServe, resp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_NotReadyServe, resp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetDataDistribution_Normal() {
@ -1701,7 +1701,7 @@ func (suite *ServiceSuite) TestGetDataDistribution_Normal() {
resp, err := suite.node.GetDataDistribution(ctx, req)
suite.NoError(err)
suite.Equal(commonpb.ErrorCode_Success, resp.Status.ErrorCode)
suite.Equal(commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
func (suite *ServiceSuite) TestGetDataDistribution_Failed() {

View File

@ -202,7 +202,7 @@ func (b *ServerBroker) Flush(ctx context.Context, cID int64, segIDs []int64) err
return errors.New("failed to call flush to data coordinator: " + err.Error())
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(resp.Status.Reason)
return errors.New(resp.GetStatus().GetReason())
}
log.Info("flush on collection succeed", zap.Int64("collectionID", cID))
return nil
@ -251,7 +251,7 @@ func (b *ServerBroker) GetSegmentIndexState(ctx context.Context, collID UniqueID
return nil, err
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return nil, errors.New(resp.Status.Reason)
return nil, errors.New(resp.GetStatus().GetReason())
}
return resp.GetStates(), nil

View File

@ -544,7 +544,7 @@ func TestImportManager_ImportJob(t *testing.T) {
// nil request
mgr := newImportManager(context.TODO(), mockKv, idAlloc, nil, callGetSegmentStates, nil, nil)
resp := mgr.importJob(context.TODO(), nil, colID, 0)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
rowReq := &milvuspb.ImportRequest{
CollectionName: "c1",
@ -554,11 +554,11 @@ func TestImportManager_ImportJob(t *testing.T) {
// nil callImportService
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
// row-based import not allow multiple files
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return &datapb.ImportTaskResponse{
@ -641,9 +641,9 @@ func TestImportManager_ImportJob(t *testing.T) {
for i := 0; i <= Params.RootCoordCfg.ImportMaxPendingTaskCount.GetAsInt(); i++ {
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
if i < Params.RootCoordCfg.ImportMaxPendingTaskCount.GetAsInt()-1 {
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
} else {
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
}
}
@ -709,7 +709,7 @@ func TestImportManager_AllDataNodesBusy(t *testing.T) {
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callGetSegmentStates, nil, nil)
for i := 0; i < len(dnList); i++ {
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(mgr.pendingTasks))
assert.Equal(t, i+1, len(mgr.workingTasks))
}
@ -717,7 +717,7 @@ func TestImportManager_AllDataNodesBusy(t *testing.T) {
// all data nodes are busy, new task waiting in pending list
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callGetSegmentStates, nil, nil)
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, len(rowReq.Files), len(mgr.pendingTasks))
assert.Equal(t, 0, len(mgr.workingTasks))
@ -725,23 +725,23 @@ func TestImportManager_AllDataNodesBusy(t *testing.T) {
count = 0
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callGetSegmentStates, nil, nil)
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(mgr.pendingTasks))
assert.Equal(t, 1, len(mgr.workingTasks))
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(mgr.pendingTasks))
assert.Equal(t, 2, len(mgr.workingTasks))
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(mgr.pendingTasks))
assert.Equal(t, 3, len(mgr.workingTasks))
// all data nodes are busy now, new task is pending
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 1, len(mgr.pendingTasks))
assert.Equal(t, 3, len(mgr.workingTasks))
}
@ -828,14 +828,14 @@ func TestImportManager_TaskState(t *testing.T) {
assert.Equal(t, int64(1000), ti.GetState().GetRowCount())
resp := mgr.getTaskState(10000)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
resp = mgr.getTaskState(2)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, commonpb.ImportState_ImportPersisted, resp.State)
resp = mgr.getTaskState(1)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, commonpb.ImportState_ImportStarted, resp.State)
info = &rootcoordpb.ImportResult{
@ -892,7 +892,7 @@ func TestImportManager_AllocFail(t *testing.T) {
}
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callGetSegmentStates, nil, nil)
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
assert.Equal(t, 0, len(mgr.pendingTasks))
}

View File

@ -46,7 +46,7 @@ func Test_ListDBTask(t *testing.T) {
err = task.Execute(context.Background())
assert.Error(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.Resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, task.Resp.GetStatus().GetErrorCode())
})
t.Run("ok", func(t *testing.T) {
@ -75,6 +75,6 @@ func Test_ListDBTask(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 1, len(task.Resp.GetDbNames()))
assert.Equal(t, ret[0].Name, task.Resp.GetDbNames()[0])
assert.Equal(t, commonpb.ErrorCode_Success, task.Resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, task.Resp.GetStatus().GetErrorCode())
})
}

View File

@ -125,7 +125,7 @@ func (p *proxyManager) startWatchEtcd(ctx context.Context, eventCh clientv3.Watc
err2 := p.WatchProxy()
if err2 != nil {
log.Error("re watch proxy fails when etcd has a compaction error",
zap.String("etcd error", err.Error()), zap.Error(err2))
zap.Error(err), zap.Error(err2))
panic("failed to handle etcd request, exit..")
}
return

View File

@ -958,7 +958,7 @@ func TestRootCoord_GetMetrics(t *testing.T) {
withMetricsCacheManager())
resp, err := c.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("get system info metrics from cache", func(t *testing.T) {
@ -975,7 +975,7 @@ func TestRootCoord_GetMetrics(t *testing.T) {
})
resp, err := c.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("get system info metrics, cache miss", func(t *testing.T) {
@ -988,7 +988,7 @@ func TestRootCoord_GetMetrics(t *testing.T) {
c.metricsCacheManager.InvalidateSystemInfoMetrics()
resp, err := c.GetMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("get system info metrics", func(t *testing.T) {
@ -1000,7 +1000,7 @@ func TestRootCoord_GetMetrics(t *testing.T) {
withMetricsCacheManager())
resp, err := c.getSystemInfoMetrics(ctx, req)
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
}
@ -1520,13 +1520,13 @@ func TestCore_Rbac(t *testing.T) {
{
resp, err := c.GetCredential(ctx, &rootcoordpb.GetCredentialRequest{})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_NotReadyServe, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_NotReadyServe, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.ListCredUsers(ctx, &milvuspb.ListCredUsersRequest{})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_NotReadyServe, resp.Status.ErrorCode)
assert.Equal(t, commonpb.ErrorCode_NotReadyServe, resp.GetStatus().GetErrorCode())
}
{
@ -1550,13 +1550,13 @@ func TestCore_Rbac(t *testing.T) {
{
resp, err := c.SelectRole(ctx, &milvuspb.SelectRoleRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.SelectUser(ctx, &milvuspb.SelectUserRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
@ -1568,13 +1568,13 @@ func TestCore_Rbac(t *testing.T) {
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.ListPolicy(ctx, &internalpb.ListPolicyRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
}
@ -1822,7 +1822,7 @@ func TestRootCoord_RBACError(t *testing.T) {
t.Run("get credential failed", func(t *testing.T) {
resp, err := c.GetCredential(ctx, &rootcoordpb.GetCredentialRequest{Username: "foo"})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("update credential failed", func(t *testing.T) {
resp, err := c.UpdateCredential(ctx, &internalpb.CredentialInfo{})
@ -1837,7 +1837,7 @@ func TestRootCoord_RBACError(t *testing.T) {
t.Run("list credential failed", func(t *testing.T) {
resp, err := c.ListCredUsers(ctx, &milvuspb.ListCredUsersRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
t.Run("create role failed", func(t *testing.T) {
resp, err := c.CreateRole(ctx, &milvuspb.CreateRoleRequest{Entity: &milvuspb.RoleEntity{Name: "foo"}})
@ -1871,24 +1871,24 @@ func TestRootCoord_RBACError(t *testing.T) {
{
resp, err := c.SelectRole(ctx, &milvuspb.SelectRoleRequest{Role: &milvuspb.RoleEntity{Name: "foo"}})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.SelectRole(ctx, &milvuspb.SelectRoleRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
})
t.Run("select user failed", func(t *testing.T) {
{
resp, err := c.SelectUser(ctx, &milvuspb.SelectUserRequest{User: &milvuspb.UserEntity{Name: "foo"}})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.SelectUser(ctx, &milvuspb.SelectUserRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
})
t.Run("operate privilege failed", func(t *testing.T) {
@ -1957,12 +1957,12 @@ func TestRootCoord_RBACError(t *testing.T) {
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{Entity: &milvuspb.GrantEntity{Role: &milvuspb.RoleEntity{Name: "foo"}}})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
mockMeta := c.meta.(*mockMetaTable)
mockMeta.SelectRoleFunc = func(tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) {
@ -1971,12 +1971,12 @@ func TestRootCoord_RBACError(t *testing.T) {
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{Entity: &milvuspb.GrantEntity{Role: &milvuspb.RoleEntity{Name: "foo"}, Object: &milvuspb.ObjectEntity{Name: "CollectionFoo"}}})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
{
resp, err := c.SelectGrant(ctx, &milvuspb.SelectGrantRequest{Entity: &milvuspb.GrantEntity{Role: &milvuspb.RoleEntity{Name: "foo"}, Object: &milvuspb.ObjectEntity{Name: "Collection"}}})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
}
mockMeta.SelectRoleFunc = func(tenant string, entity *milvuspb.RoleEntity, includeUserInfo bool) ([]*milvuspb.RoleResult, error) {
return nil, errors.New("mock error")
@ -1986,7 +1986,7 @@ func TestRootCoord_RBACError(t *testing.T) {
t.Run("list policy failed", func(t *testing.T) {
resp, err := c.ListPolicy(ctx, &internalpb.ListPolicyRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
mockMeta := c.meta.(*mockMetaTable)
mockMeta.ListPolicyFunc = func(tenant string) ([]string, error) {
@ -1994,7 +1994,7 @@ func TestRootCoord_RBACError(t *testing.T) {
}
resp, err = c.ListPolicy(ctx, &internalpb.ListPolicyRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
mockMeta.ListPolicyFunc = func(tenant string) ([]string, error) {
return []string{}, errors.New("mock error")
}

View File

@ -37,7 +37,7 @@ func WaitForComponentStates(ctx context.Context, service types.Component, servic
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(resp.Status.Reason)
return errors.New(resp.GetStatus().GetReason())
}
meet := false