2022-03-21 15:47:23 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package rootcoord
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-04-25 11:07:47 +08:00
|
|
|
"errors"
|
2022-04-03 11:37:29 +08:00
|
|
|
"sync"
|
2022-03-21 15:47:23 +08:00
|
|
|
"testing"
|
2022-04-03 11:37:29 +08:00
|
|
|
"time"
|
2022-03-21 15:47:23 +08:00
|
|
|
|
2022-03-25 11:03:25 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
2022-10-16 20:49:27 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/commonpb"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/milvuspb"
|
2022-03-25 11:03:25 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/kv"
|
2022-03-21 15:47:23 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2022-09-26 18:06:54 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
2022-03-21 15:47:23 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
2022-10-20 12:15:27 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/importutil"
|
2022-04-24 11:29:45 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/util/typeutil"
|
2022-03-21 15:47:23 +08:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
)
|
|
|
|
|
2022-03-25 11:03:25 +08:00
|
|
|
type customKV struct {
|
|
|
|
kv.MockMetaKV
|
|
|
|
}
|
|
|
|
|
2022-03-21 15:47:23 +08:00
|
|
|
func TestImportManager_NewImportManager(t *testing.T) {
|
2022-04-24 11:29:45 +08:00
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
2022-03-25 11:03:25 +08:00
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
2022-09-26 18:06:54 +08:00
|
|
|
Params.RootCoordCfg.ImportTaskExpiration = 50
|
|
|
|
Params.RootCoordCfg.ImportTaskRetention = 200
|
2022-05-07 14:05:52 +08:00
|
|
|
checkPendingTasksInterval = 100
|
2022-09-26 18:06:54 +08:00
|
|
|
cleanUpLoopInterval = 100
|
2022-03-25 11:03:25 +08:00
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
2022-03-25 11:03:25 +08:00
|
|
|
ti1 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 100,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
CreateTs: time.Now().Unix() - 100,
|
2022-03-25 11:03:25 +08:00
|
|
|
}
|
|
|
|
ti2 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 200,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPersisted,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
CreateTs: time.Now().Unix() - 100,
|
2022-03-25 11:03:25 +08:00
|
|
|
}
|
|
|
|
taskInfo1, err := proto.Marshal(ti1)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
taskInfo2, err := proto.Marshal(ti2)
|
|
|
|
assert.NoError(t, err)
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.Save(BuildImportTaskKey(1), "value")
|
|
|
|
mockKv.Save(BuildImportTaskKey(100), string(taskInfo1))
|
|
|
|
mockKv.Save(BuildImportTaskKey(200), string(taskInfo2))
|
|
|
|
|
|
|
|
mockCallImportServiceErr := false
|
|
|
|
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
|
|
|
if mockCallImportServiceErr {
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
}, errors.New("mock err")
|
|
|
|
}
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
2022-04-03 11:37:29 +08:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("working task expired", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-03 11:37:29 +08:00
|
|
|
assert.NotNil(t, mgr)
|
2022-09-26 18:06:54 +08:00
|
|
|
_, err := mgr.loadFromTaskStore(true)
|
|
|
|
assert.NoError(t, err)
|
2022-04-03 11:37:29 +08:00
|
|
|
var wgLoop sync.WaitGroup
|
2022-05-07 14:05:52 +08:00
|
|
|
wgLoop.Add(2)
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr.sendOutTasks(ctx)
|
|
|
|
assert.Equal(t, 1, len(mgr.workingTasks))
|
|
|
|
mgr.cleanupLoop(&wgLoop)
|
|
|
|
assert.Equal(t, 0, len(mgr.workingTasks))
|
2022-05-07 14:05:52 +08:00
|
|
|
mgr.sendOutTasksLoop(&wgLoop)
|
2022-04-03 11:37:29 +08:00
|
|
|
wgLoop.Wait()
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("context done", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
|
|
|
|
defer cancel()
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-03 11:37:29 +08:00
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
mgr.init(context.TODO())
|
|
|
|
var wgLoop sync.WaitGroup
|
2022-05-07 14:05:52 +08:00
|
|
|
wgLoop.Add(2)
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr.cleanupLoop(&wgLoop)
|
2022-05-07 14:05:52 +08:00
|
|
|
mgr.sendOutTasksLoop(&wgLoop)
|
2022-04-03 11:37:29 +08:00
|
|
|
wgLoop.Wait()
|
|
|
|
})
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
wg.Add(1)
|
|
|
|
t.Run("importManager init fail because of loadFromTaskStore fail", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
mockKv.LoadWithPrefixMockErr = true
|
|
|
|
defer func() {
|
|
|
|
mockKv.LoadWithPrefixMockErr = false
|
|
|
|
}()
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
assert.Panics(t, func() {
|
|
|
|
mgr.init(context.TODO())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("sendOutTasks fail", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
mockKv.SaveMockErr = true
|
|
|
|
defer func() {
|
|
|
|
mockKv.SaveMockErr = false
|
|
|
|
}()
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
mgr.init(context.TODO())
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("sendOutTasks fail", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
mgr.init(context.TODO())
|
|
|
|
func() {
|
|
|
|
mockKv.SaveMockErr = true
|
|
|
|
defer func() {
|
|
|
|
mockKv.SaveMockErr = false
|
|
|
|
}()
|
|
|
|
mgr.sendOutTasks(context.TODO())
|
|
|
|
}()
|
|
|
|
|
|
|
|
func() {
|
|
|
|
mockCallImportServiceErr = true
|
|
|
|
defer func() {
|
|
|
|
mockKv.SaveMockErr = false
|
|
|
|
}()
|
|
|
|
mgr.sendOutTasks(context.TODO())
|
|
|
|
}()
|
|
|
|
})
|
|
|
|
|
2022-04-03 11:37:29 +08:00
|
|
|
wg.Add(1)
|
|
|
|
t.Run("pending task expired", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-03 11:37:29 +08:00
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
mgr.pendingTasks = append(mgr.pendingTasks, &datapb.ImportTaskInfo{
|
|
|
|
Id: 300,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
CreateTs: time.Now().Unix() + 1,
|
|
|
|
})
|
|
|
|
mgr.pendingTasks = append(mgr.pendingTasks, &datapb.ImportTaskInfo{
|
|
|
|
Id: 400,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
2022-04-03 11:37:29 +08:00
|
|
|
})
|
2022-09-26 18:06:54 +08:00
|
|
|
_, err := mgr.loadFromTaskStore(true)
|
|
|
|
assert.NoError(t, err)
|
2022-04-03 11:37:29 +08:00
|
|
|
var wgLoop sync.WaitGroup
|
2022-05-07 14:05:52 +08:00
|
|
|
wgLoop.Add(2)
|
2022-09-26 18:06:54 +08:00
|
|
|
assert.Equal(t, 2, len(mgr.pendingTasks))
|
|
|
|
mgr.cleanupLoop(&wgLoop)
|
|
|
|
assert.Equal(t, 1, len(mgr.pendingTasks))
|
2022-05-07 14:05:52 +08:00
|
|
|
mgr.sendOutTasksLoop(&wgLoop)
|
|
|
|
wgLoop.Wait()
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("check init", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
mgr.init(ctx)
|
|
|
|
var wgLoop sync.WaitGroup
|
|
|
|
wgLoop.Add(2)
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr.cleanupLoop(&wgLoop)
|
2022-05-07 14:05:52 +08:00
|
|
|
mgr.sendOutTasksLoop(&wgLoop)
|
2022-09-26 18:06:54 +08:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2022-04-03 11:37:29 +08:00
|
|
|
wgLoop.Wait()
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Wait()
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-09-30 16:54:55 +08:00
|
|
|
func TestImportManager_TestSetImportTaskState(t *testing.T) {
|
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
Params.RootCoordCfg.ImportTaskExpiration = 50
|
|
|
|
Params.RootCoordCfg.ImportTaskRetention = 200
|
|
|
|
checkPendingTasksInterval = 100
|
|
|
|
cleanUpLoopInterval = 100
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
|
|
|
mockKv.InMemKv = sync.Map{}
|
|
|
|
ti1 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 100,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
|
|
|
}
|
|
|
|
ti2 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 200,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPersisted,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
|
|
|
}
|
|
|
|
taskInfo1, err := proto.Marshal(ti1)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
taskInfo2, err := proto.Marshal(ti2)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
mockKv.Save(BuildImportTaskKey(1), "value")
|
|
|
|
mockKv.Save(BuildImportTaskKey(100), string(taskInfo1))
|
|
|
|
mockKv.Save(BuildImportTaskKey(200), string(taskInfo2))
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("working task expired", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, nil, nil, nil, nil, nil, nil)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
_, err := mgr.loadFromTaskStore(true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
// Task not exist.
|
|
|
|
assert.Error(t, mgr.setImportTaskState(999, commonpb.ImportState_ImportStarted))
|
|
|
|
// Normal case: update in-mem task state.
|
|
|
|
assert.NoError(t, mgr.setImportTaskState(100, commonpb.ImportState_ImportPersisted))
|
|
|
|
v, err := mockKv.Load(BuildImportTaskKey(100))
|
|
|
|
assert.NoError(t, err)
|
|
|
|
ti := &datapb.ImportTaskInfo{}
|
|
|
|
err = proto.Unmarshal([]byte(v), ti)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, ti.GetState().GetStateCode(), commonpb.ImportState_ImportPersisted)
|
|
|
|
// Normal case: update Etcd task state.
|
|
|
|
assert.NoError(t, mgr.setImportTaskState(200, commonpb.ImportState_ImportFailedAndCleaned))
|
|
|
|
v, err = mockKv.Load(BuildImportTaskKey(200))
|
|
|
|
assert.NoError(t, err)
|
|
|
|
ti = &datapb.ImportTaskInfo{}
|
|
|
|
err = proto.Unmarshal([]byte(v), ti)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, ti.GetState().GetStateCode(), commonpb.ImportState_ImportFailedAndCleaned)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
func TestImportManager_TestEtcdCleanUp(t *testing.T) {
|
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
Params.RootCoordCfg.ImportTaskExpiration = 50
|
|
|
|
Params.RootCoordCfg.ImportTaskRetention = 200
|
|
|
|
checkPendingTasksInterval = 100
|
|
|
|
cleanUpLoopInterval = 100
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
|
|
|
mockKv.InMemKv = sync.Map{}
|
|
|
|
ti1 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 100,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 500,
|
|
|
|
}
|
|
|
|
ti2 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 200,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPersisted,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 500,
|
|
|
|
}
|
|
|
|
ti3 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 300,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPersisted,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
|
|
|
}
|
|
|
|
taskInfo3, err := proto.Marshal(ti3)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
taskInfo1, err := proto.Marshal(ti1)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
taskInfo2, err := proto.Marshal(ti2)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
mockKv.Save(BuildImportTaskKey(100), string(taskInfo1))
|
|
|
|
mockKv.Save(BuildImportTaskKey(200), string(taskInfo2))
|
|
|
|
mockKv.Save(BuildImportTaskKey(300), string(taskInfo3))
|
|
|
|
|
|
|
|
mockCallImportServiceErr := false
|
|
|
|
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
|
|
|
if mockCallImportServiceErr {
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
}, errors.New("mock err")
|
|
|
|
}
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
_, err = mgr.loadFromTaskStore(true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
var wgLoop sync.WaitGroup
|
|
|
|
wgLoop.Add(2)
|
|
|
|
keys, _, _ := mockKv.LoadWithPrefix("")
|
|
|
|
// All 3 tasks are stored in Etcd.
|
|
|
|
assert.Equal(t, 3, len(keys))
|
2022-09-30 16:54:55 +08:00
|
|
|
mgr.busyNodes[20] = time.Now().Unix() - 20*60
|
|
|
|
mgr.busyNodes[30] = time.Now().Unix()
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr.cleanupLoop(&wgLoop)
|
|
|
|
keys, _, _ = mockKv.LoadWithPrefix("")
|
|
|
|
// task 1 and task 2 have passed retention period.
|
|
|
|
assert.Equal(t, 1, len(keys))
|
|
|
|
mgr.sendOutTasksLoop(&wgLoop)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestImportManager_TestFlipTaskStateLoop(t *testing.T) {
|
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
Params.RootCoordCfg.ImportTaskExpiration = 50
|
|
|
|
Params.RootCoordCfg.ImportTaskRetention = 200
|
|
|
|
checkPendingTasksInterval = 100
|
|
|
|
cleanUpLoopInterval = 100
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
|
|
|
mockKv.InMemKv = sync.Map{}
|
|
|
|
ti1 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 100,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPending,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
|
|
|
}
|
|
|
|
ti2 := &datapb.ImportTaskInfo{
|
|
|
|
Id: 200,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportPersisted,
|
|
|
|
Segments: []int64{201, 202, 203},
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
|
|
|
}
|
|
|
|
taskInfo1, err := proto.Marshal(ti1)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
taskInfo2, err := proto.Marshal(ti2)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
mockKv.Save(BuildImportTaskKey(1), "value")
|
|
|
|
mockKv.Save(BuildImportTaskKey(100), string(taskInfo1))
|
|
|
|
mockKv.Save(BuildImportTaskKey(200), string(taskInfo2))
|
|
|
|
|
|
|
|
mockCallImportServiceErr := false
|
|
|
|
callImportServiceFn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
|
|
|
if mockCallImportServiceErr {
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
}, errors.New("mock err")
|
|
|
|
}
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
callDescribeIndex := func(ctx context.Context, colID UniqueID) (*indexpb.DescribeIndexResponse, error) {
|
|
|
|
return &indexpb.DescribeIndexResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
IndexInfos: []*indexpb.IndexInfo{
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
callGetSegmentIndexState := func(ctx context.Context, collID UniqueID, indexName string,
|
|
|
|
segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) {
|
|
|
|
return []*indexpb.SegmentIndexState{
|
|
|
|
{
|
|
|
|
SegmentID: 200,
|
|
|
|
State: commonpb.IndexState_Finished,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
SegmentID: 201,
|
|
|
|
State: commonpb.IndexState_Finished,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
SegmentID: 202,
|
|
|
|
State: commonpb.IndexState_Finished,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
callUnsetIsImportingState := func(context.Context, *datapb.UnsetIsImportingStateRequest) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
flipTaskStateInterval = 50
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("normal case", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped,
|
|
|
|
nil, callDescribeIndex, callGetSegmentIndexState, callUnsetIsImportingState)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
var wgLoop sync.WaitGroup
|
|
|
|
wgLoop.Add(1)
|
|
|
|
mgr.flipTaskStateLoop(&wgLoop)
|
|
|
|
wgLoop.Wait()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("describe index fail", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
callDescribeIndex = func(ctx context.Context, colID UniqueID) (*indexpb.DescribeIndexResponse, error) {
|
|
|
|
return &indexpb.DescribeIndexResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped,
|
|
|
|
nil, callDescribeIndex, callGetSegmentIndexState, callUnsetIsImportingState)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
var wgLoop sync.WaitGroup
|
|
|
|
wgLoop.Add(1)
|
|
|
|
mgr.flipTaskStateLoop(&wgLoop)
|
|
|
|
wgLoop.Wait()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
})
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
t.Run("describe index with index not exist", func(t *testing.T) {
|
|
|
|
defer wg.Done()
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
callDescribeIndex = func(ctx context.Context, colID UniqueID) (*indexpb.DescribeIndexResponse, error) {
|
|
|
|
return &indexpb.DescribeIndexResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_IndexNotExist,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
mgr := newImportManager(ctx, mockKv, idAlloc, callImportServiceFn, callMarkSegmentsDropped,
|
|
|
|
nil, callDescribeIndex, callGetSegmentIndexState, callUnsetIsImportingState)
|
|
|
|
assert.NotNil(t, mgr)
|
|
|
|
var wgLoop sync.WaitGroup
|
|
|
|
wgLoop.Add(1)
|
|
|
|
mgr.flipTaskStateLoop(&wgLoop)
|
|
|
|
wgLoop.Wait()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
})
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2022-03-21 15:47:23 +08:00
|
|
|
func TestImportManager_ImportJob(t *testing.T) {
|
2022-04-24 11:29:45 +08:00
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
2022-03-25 11:03:25 +08:00
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
2022-03-31 13:51:28 +08:00
|
|
|
colID := int64(100)
|
2022-03-25 11:03:25 +08:00
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
2022-10-27 16:21:34 +08:00
|
|
|
|
|
|
|
// nil request
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr := newImportManager(context.TODO(), mockKv, idAlloc, nil, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp := mgr.importJob(context.TODO(), nil, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
|
|
|
|
rowReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-10-27 16:21:34 +08:00
|
|
|
Files: []string{"f1.json", "f2.json", "f3.json"},
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
// nil callImportService
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// row-based import not allow multiple files
|
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
// row-based case, task count equal to file count
|
|
|
|
// since the importServiceFunc return error, tasks will be kept in pending list
|
2022-11-02 10:23:35 +08:00
|
|
|
rowReq.Files = []string{"f1.json"}
|
2022-10-27 16:21:34 +08:00
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, len(rowReq.Files), len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 0, len(mgr.workingTasks))
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
colReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
|
|
|
Files: []string{"f1.npy", "f2.npy", "f3.npy"},
|
|
|
|
}
|
|
|
|
|
|
|
|
// column-based case, one quest one task
|
|
|
|
// since the importServiceFunc return error, tasks will be kept in pending list
|
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, 1, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 0, len(mgr.workingTasks))
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
importServiceFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// row-based case, since the importServiceFunc return success, tasks will be sent to working list
|
2022-10-27 16:21:34 +08:00
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, len(rowReq.Files), len(mgr.workingTasks))
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// column-based case, since the importServiceFunc return success, tasks will be sent to working list
|
2022-10-27 16:21:34 +08:00
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 1, len(mgr.workingTasks))
|
|
|
|
|
|
|
|
count := 0
|
2022-10-27 16:21:34 +08:00
|
|
|
importServiceFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-11-02 10:23:35 +08:00
|
|
|
if count >= 1 {
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
count++
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// row-based case, since the importServiceFunc return success for 1 task
|
|
|
|
// the first task is sent to working list, and 1 task left in pending list
|
2022-10-27 16:21:34 +08:00
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-04-20 14:03:40 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-11-02 10:23:35 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 1, len(mgr.workingTasks))
|
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
assert.Equal(t, 1, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 1, len(mgr.workingTasks))
|
2022-06-16 13:02:10 +08:00
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// the pending list already has one task
|
|
|
|
// once task count exceeds MaxPendingCount, return error
|
2022-10-27 16:21:34 +08:00
|
|
|
for i := 0; i <= MaxPendingCount; i++ {
|
2022-11-02 10:23:35 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
if i < MaxPendingCount-1 {
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
} else {
|
|
|
|
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
}
|
2022-06-16 13:02:10 +08:00
|
|
|
}
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-05-07 14:05:52 +08:00
|
|
|
func TestImportManager_AllDataNodesBusy(t *testing.T) {
|
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
colID := int64(100)
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
2022-05-07 14:05:52 +08:00
|
|
|
rowReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-11-02 10:23:35 +08:00
|
|
|
Files: []string{"f1.json"},
|
2022-05-07 14:05:52 +08:00
|
|
|
}
|
|
|
|
colReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-10-27 16:21:34 +08:00
|
|
|
Files: []string{"f1.npy", "f2.npy"},
|
2022-05-07 14:05:52 +08:00
|
|
|
Options: []*commonpb.KeyValuePair{
|
|
|
|
{
|
2022-10-20 12:15:27 +08:00
|
|
|
Key: importutil.Bucket,
|
2022-05-07 14:05:52 +08:00
|
|
|
Value: "mybucket",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
dnList := []int64{1, 2, 3}
|
|
|
|
count := 0
|
2022-11-02 10:23:35 +08:00
|
|
|
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-05-07 14:05:52 +08:00
|
|
|
if count < len(dnList) {
|
|
|
|
count++
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
|
|
|
DatanodeId: dnList[count-1],
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-05-07 14:05:52 +08:00
|
|
|
}
|
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-05-07 14:05:52 +08:00
|
|
|
}
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
2022-05-07 14:05:52 +08:00
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// each data node owns one task
|
|
|
|
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
for i := 0; i < len(dnList); i++ {
|
|
|
|
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, i+1, len(mgr.workingTasks))
|
|
|
|
}
|
|
|
|
|
|
|
|
// all data nodes are busy, new task waiting in pending list
|
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.Equal(t, len(rowReq.Files), len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 0, len(mgr.workingTasks))
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// now all data nodes are free again, new task is executed instantly
|
2022-05-07 14:05:52 +08:00
|
|
|
count = 0
|
2022-11-02 10:23:35 +08:00
|
|
|
mgr = newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 1, len(mgr.workingTasks))
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 2, len(mgr.workingTasks))
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 3, len(mgr.workingTasks))
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// all data nodes are busy now, new task is pending
|
|
|
|
resp = mgr.importJob(context.TODO(), colReq, colID, 0)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-05-07 14:05:52 +08:00
|
|
|
assert.Equal(t, 1, len(mgr.pendingTasks))
|
|
|
|
assert.Equal(t, 3, len(mgr.workingTasks))
|
|
|
|
}
|
|
|
|
|
2022-03-21 15:47:23 +08:00
|
|
|
func TestImportManager_TaskState(t *testing.T) {
|
2022-04-24 11:29:45 +08:00
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
2022-03-25 11:03:25 +08:00
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
2022-03-31 13:51:28 +08:00
|
|
|
colID := int64(100)
|
2022-03-25 11:03:25 +08:00
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
2022-11-02 10:23:35 +08:00
|
|
|
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-03-22 15:11:24 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rowReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-11-02 10:23:35 +08:00
|
|
|
Files: []string{"f1.json"},
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// add 3 tasks, their ID is 10000, 10001, 10002, make sure updateTaskInfo() works correctly
|
|
|
|
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
rowReq.Files = []string{"f2.json"}
|
|
|
|
mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
rowReq.Files = []string{"f3.json"}
|
2022-04-20 14:03:40 +08:00
|
|
|
mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-03-21 15:47:23 +08:00
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
info := &rootcoordpb.ImportResult{
|
2022-03-21 15:47:23 +08:00
|
|
|
TaskId: 10000,
|
|
|
|
}
|
2022-09-26 18:06:54 +08:00
|
|
|
_, err := mgr.updateTaskInfo(info)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.NotNil(t, err)
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
info = &rootcoordpb.ImportResult{
|
2022-04-24 11:29:45 +08:00
|
|
|
TaskId: 2,
|
2022-03-21 15:47:23 +08:00
|
|
|
RowCount: 1000,
|
2022-09-26 18:06:54 +08:00
|
|
|
State: commonpb.ImportState_ImportPersisted,
|
2022-03-25 11:03:25 +08:00
|
|
|
Infos: []*commonpb.KeyValuePair{
|
|
|
|
{
|
|
|
|
Key: "key1",
|
|
|
|
Value: "value1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "failed_reason",
|
|
|
|
Value: "some_reason",
|
|
|
|
},
|
|
|
|
},
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
2022-09-26 18:06:54 +08:00
|
|
|
ti, err := mgr.updateTaskInfo(info)
|
2022-03-31 13:51:28 +08:00
|
|
|
assert.NoError(t, err)
|
2022-04-24 11:29:45 +08:00
|
|
|
assert.Equal(t, int64(2), ti.GetId())
|
2022-03-31 13:51:28 +08:00
|
|
|
assert.Equal(t, int64(100), ti.GetCollectionId())
|
|
|
|
assert.Equal(t, int64(0), ti.GetPartitionId())
|
2022-10-27 16:21:34 +08:00
|
|
|
assert.Equal(t, []string{"f2.json"}, ti.GetFiles())
|
2022-09-26 18:06:54 +08:00
|
|
|
assert.Equal(t, commonpb.ImportState_ImportPersisted, ti.GetState().GetStateCode())
|
2022-03-31 13:51:28 +08:00
|
|
|
assert.Equal(t, int64(1000), ti.GetState().GetRowCount())
|
2022-03-21 15:47:23 +08:00
|
|
|
|
|
|
|
resp := mgr.getTaskState(10000)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.Status.ErrorCode)
|
|
|
|
|
2022-04-24 11:29:45 +08:00
|
|
|
resp = mgr.getTaskState(2)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-09-26 18:06:54 +08:00
|
|
|
assert.Equal(t, commonpb.ImportState_ImportPersisted, resp.State)
|
2022-03-21 15:47:23 +08:00
|
|
|
|
2022-04-24 11:29:45 +08:00
|
|
|
resp = mgr.getTaskState(1)
|
2022-03-21 15:47:23 +08:00
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
2022-09-26 18:06:54 +08:00
|
|
|
assert.Equal(t, commonpb.ImportState_ImportStarted, resp.State)
|
|
|
|
|
|
|
|
info = &rootcoordpb.ImportResult{
|
|
|
|
TaskId: 1,
|
|
|
|
RowCount: 1000,
|
|
|
|
State: commonpb.ImportState_ImportFailed,
|
|
|
|
Infos: []*commonpb.KeyValuePair{
|
|
|
|
{
|
|
|
|
Key: "key1",
|
|
|
|
Value: "value1",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Key: "failed_reason",
|
|
|
|
Value: "some_reason",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
newTaskInfo, err := mgr.updateTaskInfo(info)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, commonpb.ImportState_ImportFailed, newTaskInfo.GetState().GetStateCode())
|
|
|
|
|
|
|
|
newTaskInfo, err = mgr.updateTaskInfo(info)
|
|
|
|
assert.Error(t, err)
|
|
|
|
assert.Nil(t, newTaskInfo)
|
2022-03-21 15:47:23 +08:00
|
|
|
}
|
2022-04-25 11:07:47 +08:00
|
|
|
|
|
|
|
func TestImportManager_AllocFail(t *testing.T) {
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
return 0, 0, errors.New("injected failure")
|
|
|
|
}
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
colID := int64(100)
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
2022-11-02 10:23:35 +08:00
|
|
|
importServiceFunc := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-04-25 11:07:47 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-04-25 11:07:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rowReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-11-02 10:23:35 +08:00
|
|
|
Files: []string{"f1.json"},
|
2022-04-25 11:07:47 +08:00
|
|
|
}
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
2022-11-02 10:23:35 +08:00
|
|
|
mgr := newImportManager(context.TODO(), mockKv, idAlloc, importServiceFunc, callMarkSegmentsDropped, nil, nil, nil, nil)
|
|
|
|
resp := mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
assert.Equal(t, 0, len(mgr.pendingTasks))
|
2022-04-25 11:07:47 +08:00
|
|
|
}
|
2022-04-28 17:21:47 +08:00
|
|
|
|
|
|
|
func TestImportManager_ListAllTasks(t *testing.T) {
|
|
|
|
var countLock sync.RWMutex
|
|
|
|
var globalCount = typeutil.UniqueID(0)
|
|
|
|
|
|
|
|
var idAlloc = func(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
|
|
|
|
countLock.Lock()
|
|
|
|
defer countLock.Unlock()
|
|
|
|
globalCount++
|
|
|
|
return globalCount, 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
Params.RootCoordCfg.ImportTaskSubPath = "test_import_task"
|
|
|
|
colID := int64(100)
|
|
|
|
mockKv := &kv.MockMetaKV{}
|
2022-09-26 18:06:54 +08:00
|
|
|
mockKv.InMemKv = sync.Map{}
|
2022-04-28 17:21:47 +08:00
|
|
|
|
|
|
|
// reject some tasks so there are 3 tasks left in pending list
|
2022-09-26 18:06:54 +08:00
|
|
|
fn := func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-04-28 17:21:47 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_UnexpectedError,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-04-28 17:21:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rowReq := &milvuspb.ImportRequest{
|
|
|
|
CollectionName: "c1",
|
|
|
|
PartitionName: "p1",
|
2022-11-02 10:23:35 +08:00
|
|
|
Files: []string{"f1.json"},
|
2022-04-28 17:21:47 +08:00
|
|
|
}
|
2022-09-26 18:06:54 +08:00
|
|
|
callMarkSegmentsDropped := func(ctx context.Context, segIDs []typeutil.UniqueID) (*commonpb.Status, error) {
|
|
|
|
return &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
mgr := newImportManager(context.TODO(), mockKv, idAlloc, fn, callMarkSegmentsDropped, nil, nil, nil, nil)
|
2022-11-02 10:23:35 +08:00
|
|
|
repeat := 10
|
|
|
|
for i := 0; i < repeat; i++ {
|
|
|
|
mgr.importJob(context.TODO(), rowReq, colID, 0)
|
|
|
|
}
|
2022-04-28 17:21:47 +08:00
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// list all tasks
|
|
|
|
tasks := mgr.listAllTasks("", int64(repeat))
|
|
|
|
assert.Equal(t, repeat, len(tasks))
|
|
|
|
for i := 0; i < repeat; i++ {
|
|
|
|
assert.Equal(t, int64(i+1), tasks[i].Id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// list few tasks
|
|
|
|
limit := 3
|
|
|
|
tasks = mgr.listAllTasks("", int64(limit))
|
|
|
|
assert.Equal(t, limit, len(tasks))
|
|
|
|
for i := 0; i < limit; i++ {
|
|
|
|
assert.Equal(t, int64(i+repeat-limit+1), tasks[i].Id)
|
|
|
|
}
|
2022-04-28 17:21:47 +08:00
|
|
|
|
|
|
|
resp := mgr.getTaskState(1)
|
|
|
|
assert.Equal(t, commonpb.ErrorCode_Success, resp.Status.ErrorCode)
|
|
|
|
assert.Equal(t, commonpb.ImportState_ImportPending, resp.State)
|
|
|
|
assert.Equal(t, int64(1), resp.Id)
|
|
|
|
|
|
|
|
// accept tasks to working list
|
2022-09-26 18:06:54 +08:00
|
|
|
mgr.callImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
|
2022-04-28 17:21:47 +08:00
|
|
|
return &datapb.ImportTaskResponse{
|
|
|
|
Status: &commonpb.Status{
|
|
|
|
ErrorCode: commonpb.ErrorCode_Success,
|
|
|
|
},
|
2022-09-26 18:06:54 +08:00
|
|
|
}, nil
|
2022-04-28 17:21:47 +08:00
|
|
|
}
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
// there are 10 tasks in working list, and 1 task in pending list, totally 11 tasks
|
2022-04-28 17:21:47 +08:00
|
|
|
mgr.importJob(context.TODO(), rowReq, colID, 0)
|
2022-09-26 18:06:54 +08:00
|
|
|
tasks = mgr.listAllTasks("", 100)
|
2022-11-02 10:23:35 +08:00
|
|
|
assert.Equal(t, repeat+1, len(tasks))
|
2022-04-28 17:21:47 +08:00
|
|
|
|
|
|
|
// the id of tasks must be 1,2,3,4,5,6(sequence not guaranteed)
|
|
|
|
ids := make(map[int64]struct{})
|
|
|
|
for i := 0; i < len(tasks); i++ {
|
|
|
|
ids[int64(i)+1] = struct{}{}
|
|
|
|
}
|
|
|
|
for i := 0; i < len(tasks); i++ {
|
|
|
|
delete(ids, tasks[i].Id)
|
|
|
|
}
|
|
|
|
assert.Equal(t, 0, len(ids))
|
2022-11-02 10:23:35 +08:00
|
|
|
|
|
|
|
// list few tasks
|
|
|
|
tasks = mgr.listAllTasks("", 1)
|
|
|
|
assert.Equal(t, 1, len(tasks))
|
|
|
|
|
|
|
|
// invliad collection name, returns empty
|
|
|
|
tasks = mgr.listAllTasks("bad-collection-name", 1)
|
|
|
|
assert.Equal(t, 0, len(tasks))
|
2022-04-28 17:21:47 +08:00
|
|
|
}
|
2022-06-14 16:18:09 +08:00
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
func TestImportManager_setCollectionPartitionName(t *testing.T) {
|
2022-06-14 16:18:09 +08:00
|
|
|
mgr := &importManager{
|
|
|
|
getCollectionName: func(collID, partitionID typeutil.UniqueID) (string, string, error) {
|
2022-09-26 18:06:54 +08:00
|
|
|
if collID == 1 && partitionID == 2 {
|
|
|
|
return "c1", "p1", nil
|
|
|
|
}
|
|
|
|
return "", "", errors.New("Error")
|
2022-06-14 16:18:09 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-09-26 18:06:54 +08:00
|
|
|
info := &datapb.ImportTaskInfo{
|
|
|
|
Id: 100,
|
|
|
|
State: &datapb.ImportTaskState{
|
|
|
|
StateCode: commonpb.ImportState_ImportStarted,
|
|
|
|
},
|
|
|
|
CreateTs: time.Now().Unix() - 100,
|
2022-06-14 16:18:09 +08:00
|
|
|
}
|
2022-09-26 18:06:54 +08:00
|
|
|
err := mgr.setCollectionPartitionName(1, 2, info)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.Equal(t, "c1", info.GetCollectionName())
|
|
|
|
assert.Equal(t, "p1", info.GetPartitionName())
|
|
|
|
|
|
|
|
err = mgr.setCollectionPartitionName(0, 0, info)
|
|
|
|
assert.Error(t, err)
|
2022-06-14 16:18:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestImportManager_rearrangeTasks(t *testing.T) {
|
|
|
|
tasks := make([]*milvuspb.GetImportStateResponse, 0)
|
|
|
|
tasks = append(tasks, &milvuspb.GetImportStateResponse{
|
|
|
|
Id: 100,
|
|
|
|
})
|
|
|
|
tasks = append(tasks, &milvuspb.GetImportStateResponse{
|
|
|
|
Id: 1,
|
|
|
|
})
|
|
|
|
tasks = append(tasks, &milvuspb.GetImportStateResponse{
|
|
|
|
Id: 50,
|
|
|
|
})
|
|
|
|
rearrangeTasks(tasks)
|
|
|
|
assert.Equal(t, 3, len(tasks))
|
|
|
|
assert.Equal(t, int64(1), tasks[0].GetId())
|
|
|
|
assert.Equal(t, int64(50), tasks[1].GetId())
|
|
|
|
assert.Equal(t, int64(100), tasks[2].GetId())
|
|
|
|
}
|
2022-10-27 16:21:34 +08:00
|
|
|
|
|
|
|
func TestImportManager_isRowbased(t *testing.T) {
|
|
|
|
mgr := &importManager{}
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
files := []string{"1.json"}
|
2022-10-27 16:21:34 +08:00
|
|
|
rb, err := mgr.isRowbased(files)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.True(t, rb)
|
|
|
|
|
2022-11-02 10:23:35 +08:00
|
|
|
files = []string{"1.json", "2.json"}
|
|
|
|
rb, err = mgr.isRowbased(files)
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
assert.True(t, rb)
|
|
|
|
|
2022-10-27 16:21:34 +08:00
|
|
|
files = []string{"1.json", "2.npy"}
|
|
|
|
rb, err = mgr.isRowbased(files)
|
|
|
|
assert.NotNil(t, err)
|
|
|
|
assert.True(t, rb)
|
|
|
|
|
|
|
|
files = []string{"1.npy", "2.npy"}
|
|
|
|
rb, err = mgr.isRowbased(files)
|
|
|
|
assert.Nil(t, err)
|
|
|
|
assert.False(t, rb)
|
|
|
|
}
|