2021-11-17 19:49:32 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
2021-09-15 20:40:07 +08:00
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
2021-11-17 19:49:32 +08:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2021-09-15 20:40:07 +08:00
|
|
|
//
|
2021-11-17 19:49:32 +08:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2021-08-26 14:17:54 +08:00
|
|
|
package querycoord
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
2021-10-11 09:54:37 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2021-11-25 18:49:16 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
2021-08-26 14:17:54 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/querypb"
|
2021-10-19 10:40:35 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/funcutil"
|
2021-08-26 14:17:54 +08:00
|
|
|
)
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genLoadCollectionTask(ctx context.Context, queryCoord *QueryCoord) *loadCollectionTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
req := &querypb.LoadCollectionRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadCollection,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
Schema: genCollectionSchema(defaultCollectionID, false),
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-18 21:34:47 +08:00
|
|
|
loadCollectionTask := &loadCollectionTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
LoadCollectionRequest: req,
|
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
2021-11-17 09:47:12 +08:00
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
2021-10-11 09:54:37 +08:00
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
}
|
|
|
|
return loadCollectionTask
|
|
|
|
}
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genLoadPartitionTask(ctx context.Context, queryCoord *QueryCoord) *loadPartitionTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
req := &querypb.LoadPartitionsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadPartitions,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
PartitionIDs: []UniqueID{defaultPartitionID},
|
2021-10-19 10:40:35 +08:00
|
|
|
Schema: genCollectionSchema(defaultCollectionID, false),
|
2021-10-11 09:54:37 +08:00
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-18 21:34:47 +08:00
|
|
|
loadPartitionTask := &loadPartitionTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
LoadPartitionsRequest: req,
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
2021-10-11 09:54:37 +08:00
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
2021-11-17 09:47:12 +08:00
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
2021-10-11 09:54:37 +08:00
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
}
|
|
|
|
return loadPartitionTask
|
|
|
|
}
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genReleaseCollectionTask(ctx context.Context, queryCoord *QueryCoord) *releaseCollectionTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
req := &querypb.ReleaseCollectionRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_ReleaseCollection,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-18 21:34:47 +08:00
|
|
|
releaseCollectionTask := &releaseCollectionTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
ReleaseCollectionRequest: req,
|
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
}
|
|
|
|
|
|
|
|
return releaseCollectionTask
|
|
|
|
}
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genReleasePartitionTask(ctx context.Context, queryCoord *QueryCoord) *releasePartitionTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
req := &querypb.ReleasePartitionsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_ReleasePartitions,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
PartitionIDs: []UniqueID{defaultPartitionID},
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-18 21:34:47 +08:00
|
|
|
releasePartitionTask := &releasePartitionTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
ReleasePartitionsRequest: req,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
}
|
|
|
|
|
|
|
|
return releasePartitionTask
|
|
|
|
}
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genReleaseSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *releaseSegmentTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
req := &querypb.ReleaseSegmentsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_ReleaseSegments,
|
|
|
|
},
|
|
|
|
NodeID: nodeID,
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
PartitionIDs: []UniqueID{defaultPartitionID},
|
|
|
|
SegmentIDs: []UniqueID{defaultSegmentID},
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-18 21:34:47 +08:00
|
|
|
releaseSegmentTask := &releaseSegmentTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
ReleaseSegmentsRequest: req,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
}
|
|
|
|
return releaseSegmentTask
|
|
|
|
}
|
|
|
|
|
2021-10-18 21:34:47 +08:00
|
|
|
func genWatchDmChannelTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *watchDmChannelTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
schema := genCollectionSchema(defaultCollectionID, false)
|
|
|
|
vChannelInfo := &datapb.VchannelInfo{
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
ChannelName: "testDmChannel",
|
|
|
|
}
|
|
|
|
req := &querypb.WatchDmChannelsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_WatchDmChannels,
|
|
|
|
},
|
|
|
|
NodeID: nodeID,
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
PartitionID: defaultPartitionID,
|
|
|
|
Schema: schema,
|
|
|
|
Infos: []*datapb.VchannelInfo{vChannelInfo},
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-11 09:54:37 +08:00
|
|
|
baseTask.taskID = 100
|
2021-10-18 21:34:47 +08:00
|
|
|
watchDmChannelTask := &watchDmChannelTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
WatchDmChannelsRequest: req,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
excludeNodeIDs: []int64{},
|
|
|
|
}
|
|
|
|
|
|
|
|
parentReq := &querypb.LoadCollectionRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadCollection,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
Schema: genCollectionSchema(defaultCollectionID, false),
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseParentTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-11 09:54:37 +08:00
|
|
|
baseParentTask.taskID = 10
|
2021-10-22 19:07:15 +08:00
|
|
|
baseParentTask.setState(taskDone)
|
2021-10-18 21:34:47 +08:00
|
|
|
parentTask := &loadCollectionTask{
|
|
|
|
baseTask: baseParentTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
LoadCollectionRequest: parentReq,
|
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
2021-11-17 09:47:12 +08:00
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
2021-10-11 09:54:37 +08:00
|
|
|
meta: queryCoord.meta,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
}
|
2021-10-14 20:18:33 +08:00
|
|
|
parentTask.setState(taskDone)
|
|
|
|
parentTask.setResultInfo(nil)
|
|
|
|
parentTask.addChildTask(watchDmChannelTask)
|
|
|
|
watchDmChannelTask.setParentTask(parentTask)
|
2021-10-11 09:54:37 +08:00
|
|
|
|
|
|
|
queryCoord.meta.addCollection(defaultCollectionID, schema)
|
|
|
|
return watchDmChannelTask
|
|
|
|
}
|
2021-10-18 21:34:47 +08:00
|
|
|
func genLoadSegmentTask(ctx context.Context, queryCoord *QueryCoord, nodeID int64) *loadSegmentTask {
|
2021-10-11 09:54:37 +08:00
|
|
|
schema := genCollectionSchema(defaultCollectionID, false)
|
|
|
|
segmentInfo := &querypb.SegmentLoadInfo{
|
|
|
|
SegmentID: defaultSegmentID,
|
|
|
|
PartitionID: defaultPartitionID,
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
}
|
|
|
|
req := &querypb.LoadSegmentsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadSegments,
|
|
|
|
},
|
2021-11-13 08:49:08 +08:00
|
|
|
DstNodeID: nodeID,
|
|
|
|
Schema: schema,
|
|
|
|
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
|
|
|
CollectionID: defaultCollectionID,
|
2021-10-11 09:54:37 +08:00
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-11 09:54:37 +08:00
|
|
|
baseTask.taskID = 100
|
2021-10-18 21:34:47 +08:00
|
|
|
loadSegmentTask := &loadSegmentTask{
|
|
|
|
baseTask: baseTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
LoadSegmentsRequest: req,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
excludeNodeIDs: []int64{},
|
|
|
|
}
|
|
|
|
|
|
|
|
parentReq := &querypb.LoadCollectionRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadCollection,
|
|
|
|
},
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
Schema: genCollectionSchema(defaultCollectionID, false),
|
|
|
|
}
|
2021-12-15 16:53:12 +08:00
|
|
|
baseParentTask := newBaseTask(ctx, querypb.TriggerCondition_GrpcRequest)
|
2021-10-11 09:54:37 +08:00
|
|
|
baseParentTask.taskID = 10
|
2021-10-22 19:07:15 +08:00
|
|
|
baseParentTask.setState(taskDone)
|
2021-10-18 21:34:47 +08:00
|
|
|
parentTask := &loadCollectionTask{
|
|
|
|
baseTask: baseParentTask,
|
2021-10-11 09:54:37 +08:00
|
|
|
LoadCollectionRequest: parentReq,
|
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
2021-11-17 09:47:12 +08:00
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
2021-10-11 09:54:37 +08:00
|
|
|
meta: queryCoord.meta,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
}
|
2021-10-14 20:18:33 +08:00
|
|
|
parentTask.setState(taskDone)
|
|
|
|
parentTask.setResultInfo(nil)
|
|
|
|
parentTask.addChildTask(loadSegmentTask)
|
|
|
|
loadSegmentTask.setParentTask(parentTask)
|
2021-10-11 09:54:37 +08:00
|
|
|
|
|
|
|
queryCoord.meta.addCollection(defaultCollectionID, schema)
|
|
|
|
return loadSegmentTask
|
|
|
|
}
|
|
|
|
|
|
|
|
func waitTaskFinalState(t task, state taskState) {
|
|
|
|
for {
|
2021-10-14 20:18:33 +08:00
|
|
|
if t.getState() == state {
|
2021-10-11 09:54:37 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-26 14:17:54 +08:00
|
|
|
func TestTriggerTask(t *testing.T) {
|
2021-09-15 20:40:07 +08:00
|
|
|
refreshParams()
|
2021-08-26 14:17:54 +08:00
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
2021-10-11 09:54:37 +08:00
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
2021-08-26 14:17:54 +08:00
|
|
|
|
|
|
|
t.Run("Test LoadCollection", func(t *testing.T) {
|
2021-10-11 09:54:37 +08:00
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
2021-08-26 14:17:54 +08:00
|
|
|
|
|
|
|
err = queryCoord.scheduler.processTask(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test ReleaseCollection", func(t *testing.T) {
|
2021-10-11 09:54:37 +08:00
|
|
|
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.processTask(releaseCollectionTask)
|
2021-08-26 14:17:54 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test LoadPartition", func(t *testing.T) {
|
2021-10-11 09:54:37 +08:00
|
|
|
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
|
2021-08-26 14:17:54 +08:00
|
|
|
|
2021-10-11 09:54:37 +08:00
|
|
|
err = queryCoord.scheduler.processTask(loadPartitionTask)
|
2021-08-26 14:17:54 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test ReleasePartition", func(t *testing.T) {
|
2021-10-11 09:54:37 +08:00
|
|
|
releasePartitionTask := genReleaseCollectionTask(ctx, queryCoord)
|
2021-08-26 14:17:54 +08:00
|
|
|
|
2021-10-11 09:54:37 +08:00
|
|
|
err = queryCoord.scheduler.processTask(releasePartitionTask)
|
2021-08-26 14:17:54 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
err = node.stop()
|
|
|
|
queryCoord.Stop()
|
2021-09-26 22:01:57 +08:00
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
2021-08-26 14:17:54 +08:00
|
|
|
}
|
2021-10-11 09:54:37 +08:00
|
|
|
|
|
|
|
func Test_LoadCollectionAfterLoadPartition(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
|
|
|
|
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadPartitionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(releaseCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2021-10-14 20:18:33 +08:00
|
|
|
err = releaseCollectionTask.waitToFinish()
|
2021-10-11 09:54:37 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_RepeatLoadCollection(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask1 := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask1)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
createDefaultPartition(ctx, queryCoord)
|
|
|
|
loadCollectionTask2 := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask2)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(releaseCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2021-10-14 20:18:33 +08:00
|
|
|
err = releaseCollectionTask.waitToFinish()
|
2021-10-11 09:54:37 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadCollectionAssignTaskFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2021-10-14 20:18:33 +08:00
|
|
|
err = loadCollectionTask.waitToFinish()
|
2021-10-11 09:54:37 +08:00
|
|
|
assert.NotNil(t, err)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadCollectionExecuteFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node.loadSegment = returnFailedResult
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskFailed)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadPartitionAssignTaskFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadPartitionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2021-10-14 20:18:33 +08:00
|
|
|
err = loadPartitionTask.waitToFinish()
|
2021-10-11 09:54:37 +08:00
|
|
|
assert.NotNil(t, err)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadPartitionExecuteFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node.loadSegment = returnFailedResult
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadPartitionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadPartitionTask, taskFailed)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadPartitionExecuteFailAfterLoadCollection(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
createDefaultPartition(ctx, queryCoord)
|
|
|
|
node.watchDmChannels = returnFailedResult
|
|
|
|
|
|
|
|
loadPartitionTask := genLoadPartitionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadPartitionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadPartitionTask, taskFailed)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_ReleaseCollectionExecuteFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node.releaseCollection = returnFailedResult
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node.queryNodeID)
|
|
|
|
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(releaseCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(releaseCollectionTask, taskFailed)
|
|
|
|
|
|
|
|
node.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_LoadSegmentReschedule(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node1.loadSegment = returnFailedResult
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node1.stop()
|
|
|
|
node2.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_WatchDmChannelReschedule(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node1.watchDmChannels = returnFailedResult
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node1.stop()
|
|
|
|
node2.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_ReleaseSegmentTask(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
releaseSegmentTask := genReleaseSegmentTask(ctx, queryCoord, node1.queryNodeID)
|
|
|
|
queryCoord.scheduler.activateTaskChan <- releaseSegmentTask
|
|
|
|
|
|
|
|
waitTaskFinalState(releaseSegmentTask, taskDone)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_RescheduleDmChannelWithWatchQueryChannel(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
node1.watchDmChannels = returnFailedResult
|
|
|
|
watchDmChannelTask := genWatchDmChannelTask(ctx, queryCoord, node1.queryNodeID)
|
|
|
|
loadCollectionTask := watchDmChannelTask.parentTask
|
|
|
|
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_RescheduleSegmentWithWatchQueryChannel(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
node1.loadSegment = returnFailedResult
|
|
|
|
loadSegmentTask := genLoadSegmentTask(ctx, queryCoord, node1.queryNodeID)
|
2021-11-13 08:49:08 +08:00
|
|
|
loadSegmentTask.meta.setDeltaChannel(defaultCollectionID, []*datapb.VchannelInfo{})
|
2021-10-11 09:54:37 +08:00
|
|
|
loadCollectionTask := loadSegmentTask.parentTask
|
|
|
|
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_RescheduleSegmentEndWithFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node1.loadSegment = returnFailedResult
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node2.loadSegment = returnFailedResult
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
loadSegmentTask := genLoadSegmentTask(ctx, queryCoord, node1.queryNodeID)
|
|
|
|
loadCollectionTask := loadSegmentTask.parentTask
|
|
|
|
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskFailed)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func Test_RescheduleDmChannelsEndWithFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node1.watchDmChannels = returnFailedResult
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
node2.watchDmChannels = returnFailedResult
|
|
|
|
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
watchDmChannelTask := genWatchDmChannelTask(ctx, queryCoord, node1.queryNodeID)
|
|
|
|
loadCollectionTask := watchDmChannelTask.parentTask
|
|
|
|
queryCoord.scheduler.triggerTaskQueue.addTask(loadCollectionTask)
|
|
|
|
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskFailed)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-10-19 10:40:35 +08:00
|
|
|
|
2021-10-22 19:07:15 +08:00
|
|
|
func Test_AssignInternalTask(t *testing.T) {
|
2021-10-19 10:40:35 +08:00
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
schema := genCollectionSchema(defaultCollectionID, false)
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
loadSegmentRequests := make([]*querypb.LoadSegmentsRequest, 0)
|
|
|
|
binlogs := make([]*datapb.FieldBinlog, 0)
|
|
|
|
binlogs = append(binlogs, &datapb.FieldBinlog{
|
|
|
|
FieldID: 0,
|
|
|
|
Binlogs: []string{funcutil.RandomString(1000)},
|
|
|
|
})
|
2021-10-22 19:07:15 +08:00
|
|
|
for id := 0; id < 3000; id++ {
|
2021-10-19 10:40:35 +08:00
|
|
|
segmentInfo := &querypb.SegmentLoadInfo{
|
|
|
|
SegmentID: UniqueID(id),
|
|
|
|
PartitionID: defaultPartitionID,
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
BinlogPaths: binlogs,
|
|
|
|
}
|
|
|
|
req := &querypb.LoadSegmentsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadSegments,
|
|
|
|
},
|
2021-10-22 19:07:15 +08:00
|
|
|
DstNodeID: node1.queryNodeID,
|
|
|
|
Schema: schema,
|
|
|
|
Infos: []*querypb.SegmentLoadInfo{segmentInfo},
|
2021-10-19 10:40:35 +08:00
|
|
|
}
|
|
|
|
loadSegmentRequests = append(loadSegmentRequests, req)
|
|
|
|
}
|
|
|
|
|
2021-11-12 18:49:10 +08:00
|
|
|
internalTasks, err := assignInternalTask(queryCoord.loopCtx, defaultCollectionID, loadCollectionTask, queryCoord.meta, queryCoord.cluster, loadSegmentRequests, nil, nil, false, nil, nil)
|
2021-10-19 10:40:35 +08:00
|
|
|
assert.Nil(t, err)
|
|
|
|
|
2021-11-11 12:56:42 +08:00
|
|
|
assert.NotEqual(t, 1, len(internalTasks))
|
2021-10-19 10:40:35 +08:00
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-10-22 19:07:15 +08:00
|
|
|
|
|
|
|
func Test_reverseSealedSegmentChangeInfo(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
loadSegmentTask := genLoadSegmentTask(ctx, queryCoord, node2.queryNodeID)
|
|
|
|
parentTask := loadSegmentTask.parentTask
|
|
|
|
|
|
|
|
kv := &testKv{
|
|
|
|
returnFn: failedResult,
|
|
|
|
}
|
|
|
|
queryCoord.meta.setKvClient(kv)
|
|
|
|
|
2021-10-29 18:04:49 +08:00
|
|
|
assert.Panics(t, func() {
|
|
|
|
updateSegmentInfoFromTask(ctx, parentTask, queryCoord.meta)
|
|
|
|
})
|
2021-10-22 19:07:15 +08:00
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-10-24 22:39:09 +08:00
|
|
|
|
|
|
|
func Test_handoffSegmentFail(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node1.loadSegment = returnFailedResult
|
|
|
|
|
|
|
|
infos := queryCoord.meta.showSegmentInfos(defaultCollectionID, nil)
|
|
|
|
assert.NotEqual(t, 0, len(infos))
|
|
|
|
segmentID := defaultSegmentID + 4
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_Handoff)
|
2021-10-24 22:39:09 +08:00
|
|
|
|
|
|
|
segmentInfo := &querypb.SegmentInfo{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
CollectionID: defaultCollectionID,
|
|
|
|
PartitionID: defaultPartitionID + 2,
|
2021-12-15 16:53:12 +08:00
|
|
|
SegmentState: commonpb.SegmentState_Sealed,
|
2021-10-24 22:39:09 +08:00
|
|
|
}
|
|
|
|
handoffReq := &querypb.HandoffSegmentsRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_HandoffSegments,
|
|
|
|
},
|
|
|
|
SegmentInfos: []*querypb.SegmentInfo{segmentInfo},
|
|
|
|
}
|
|
|
|
handoffTask := &handoffTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
HandoffSegmentsRequest: handoffReq,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(handoffTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
waitTaskFinalState(handoffTask, taskFailed)
|
|
|
|
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-11-06 15:22:56 +08:00
|
|
|
|
|
|
|
func TestLoadBalanceSegmentsTask(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
t.Run("Test LoadCollection", func(t *testing.T) {
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
})
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
t.Run("Test LoadBalanceBySegmentID", func(t *testing.T) {
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-06 15:22:56 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
SourceNodeIDs: []int64{node1.queryNodeID},
|
|
|
|
SealedSegmentIDs: []UniqueID{defaultSegmentID},
|
|
|
|
},
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
2021-11-06 15:22:56 +08:00
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskExpired)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test LoadBalanceByNotExistSegmentID", func(t *testing.T) {
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-06 15:22:56 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
SourceNodeIDs: []int64{node1.queryNodeID},
|
|
|
|
SealedSegmentIDs: []UniqueID{defaultSegmentID + 100},
|
|
|
|
},
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
2021-11-06 15:22:56 +08:00
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskFailed)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test LoadBalanceByNode", func(t *testing.T) {
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-06 15:22:56 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
SourceNodeIDs: []int64{node1.queryNodeID},
|
|
|
|
},
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
2021-11-06 15:22:56 +08:00
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskExpired)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test LoadBalanceWithEmptySourceNode", func(t *testing.T) {
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-06 15:22:56 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
},
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
2021-11-06 15:22:56 +08:00
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskFailed)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test LoadBalanceByNotExistNode", func(t *testing.T) {
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-06 15:22:56 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
SourceNodeIDs: []int64{node1.queryNodeID + 100},
|
|
|
|
},
|
2021-11-17 09:47:12 +08:00
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
2021-11-06 15:22:56 +08:00
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskFailed)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("Test ReleaseCollection", func(t *testing.T) {
|
|
|
|
releaseCollectionTask := genReleaseCollectionTask(ctx, queryCoord)
|
|
|
|
err = queryCoord.scheduler.processTask(releaseCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
node1.stop()
|
|
|
|
node2.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-11-18 22:21:55 +08:00
|
|
|
|
|
|
|
func TestLoadBalanceIndexedSegmentsTask(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
indexCoord := newIndexCoordMock()
|
|
|
|
indexCoord.returnIndexFile = true
|
|
|
|
queryCoord.indexCoordClient = indexCoord
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
2021-12-15 16:53:12 +08:00
|
|
|
baseTask := newBaseTask(ctx, querypb.TriggerCondition_LoadBalance)
|
2021-11-18 22:21:55 +08:00
|
|
|
loadBalanceTask := &loadBalanceTask{
|
|
|
|
baseTask: baseTask,
|
|
|
|
LoadBalanceRequest: &querypb.LoadBalanceRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
|
|
|
MsgType: commonpb.MsgType_LoadBalanceSegments,
|
|
|
|
},
|
|
|
|
SourceNodeIDs: []int64{node1.queryNodeID},
|
|
|
|
SealedSegmentIDs: []UniqueID{defaultSegmentID},
|
|
|
|
},
|
|
|
|
rootCoord: queryCoord.rootCoordClient,
|
|
|
|
dataCoord: queryCoord.dataCoordClient,
|
|
|
|
indexCoord: queryCoord.indexCoordClient,
|
|
|
|
cluster: queryCoord.cluster,
|
|
|
|
meta: queryCoord.meta,
|
|
|
|
}
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadBalanceTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadBalanceTask, taskExpired)
|
|
|
|
|
|
|
|
node1.stop()
|
|
|
|
node2.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestLoadBalanceIndexedSegmentsAfterNodeDown(t *testing.T) {
|
|
|
|
refreshParams()
|
|
|
|
ctx := context.Background()
|
|
|
|
queryCoord, err := startQueryCoord(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
|
|
|
|
node1, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node1.queryNodeID)
|
|
|
|
|
|
|
|
loadCollectionTask := genLoadCollectionTask(ctx, queryCoord)
|
|
|
|
|
|
|
|
err = queryCoord.scheduler.Enqueue(loadCollectionTask)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitTaskFinalState(loadCollectionTask, taskExpired)
|
|
|
|
|
|
|
|
node2, err := startQueryNodeServer(ctx)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
waitQueryNodeOnline(queryCoord.cluster, node2.queryNodeID)
|
|
|
|
|
|
|
|
indexCoord := newIndexCoordMock()
|
|
|
|
indexCoord.returnIndexFile = true
|
|
|
|
queryCoord.indexCoordClient = indexCoord
|
|
|
|
removeNodeSession(node1.queryNodeID)
|
|
|
|
for {
|
|
|
|
if len(queryCoord.meta.getSegmentInfosByNode(node1.queryNodeID)) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
node2.stop()
|
|
|
|
queryCoord.Stop()
|
|
|
|
err = removeAllSession()
|
|
|
|
assert.Nil(t, err)
|
|
|
|
}
|
2021-11-25 18:49:16 +08:00
|
|
|
|
|
|
|
func TestMergeWatchDeltaChannelInfo(t *testing.T) {
|
|
|
|
infos := []*datapb.VchannelInfo{
|
|
|
|
{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
Timestamp: 9,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
Timestamp: 10,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
Timestamp: 15,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
Timestamp: 16,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
Timestamp: 5,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
Timestamp: 4,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
Timestamp: 3,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
Timestamp: 5,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
results := mergeWatchDeltaChannelInfo(infos)
|
|
|
|
expected := []*datapb.VchannelInfo{
|
|
|
|
{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-1",
|
|
|
|
Timestamp: 3,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
SeekPosition: &internalpb.MsgPosition{
|
|
|
|
ChannelName: "test-2",
|
|
|
|
Timestamp: 4,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
assert.ElementsMatch(t, expected, results)
|
|
|
|
}
|