2021-11-10 19:03:38 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package datacoord
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"path"
|
2022-05-31 16:36:03 +08:00
|
|
|
"strconv"
|
2021-11-10 19:03:38 +08:00
|
|
|
"strings"
|
2023-01-04 19:37:36 +08:00
|
|
|
"sync"
|
2021-11-10 19:03:38 +08:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2023-02-26 11:31:49 +08:00
|
|
|
"github.com/cockroachdb/errors"
|
2023-03-04 23:21:50 +08:00
|
|
|
minio "github.com/minio/minio-go/v7"
|
2023-01-06 14:33:36 +08:00
|
|
|
"github.com/minio/minio-go/v7/pkg/credentials"
|
|
|
|
"github.com/stretchr/testify/assert"
|
2023-01-04 19:37:36 +08:00
|
|
|
"github.com/stretchr/testify/mock"
|
2023-01-06 14:33:36 +08:00
|
|
|
"github.com/stretchr/testify/require"
|
2023-12-14 19:26:39 +08:00
|
|
|
"github.com/stretchr/testify/suite"
|
2023-01-04 19:37:36 +08:00
|
|
|
|
2023-06-09 01:28:37 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
2023-01-06 14:33:36 +08:00
|
|
|
kvmocks "github.com/milvus-io/milvus/internal/kv/mocks"
|
|
|
|
"github.com/milvus-io/milvus/internal/metastore"
|
2023-01-04 19:37:36 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metastore/kv/datacoord"
|
2023-01-06 14:33:36 +08:00
|
|
|
catalogmocks "github.com/milvus-io/milvus/internal/metastore/mocks"
|
2023-01-04 19:37:36 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/metastore/model"
|
2023-03-04 23:21:50 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/mocks"
|
2021-11-10 19:03:38 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2023-03-04 23:21:50 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
2022-07-22 22:10:28 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/storage"
|
2023-09-21 09:45:27 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/common"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
2023-11-30 11:00:28 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/lock"
|
2023-09-05 10:31:48 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
2023-11-30 11:00:28 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
2021-11-10 19:03:38 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
func Test_garbageCollector_basic(t *testing.T) {
|
|
|
|
bucketName := `datacoord-ut` + strings.ToLower(funcutil.RandomString(8))
|
|
|
|
rootPath := `gc` + funcutil.RandomString(8)
|
2023-09-21 09:45:27 +08:00
|
|
|
// TODO change to Params
|
2021-11-24 09:55:15 +08:00
|
|
|
cli, _, _, _, _, err := initUtOSSEnv(bucketName, rootPath, 0)
|
2021-11-10 19:03:38 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-11-03 14:41:35 +08:00
|
|
|
meta, err := newMemoryMeta()
|
2023-06-08 15:36:36 +08:00
|
|
|
assert.NoError(t, err)
|
2021-11-10 19:03:38 +08:00
|
|
|
|
|
|
|
t.Run("normal gc", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
gc.start()
|
|
|
|
|
|
|
|
time.Sleep(time.Millisecond * 20)
|
|
|
|
assert.NotPanics(t, func() {
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with nil cli", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: nil,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
assert.NotPanics(t, func() {
|
|
|
|
gc.start()
|
|
|
|
})
|
|
|
|
|
|
|
|
assert.NotPanics(t, func() {
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
})
|
2023-04-12 19:38:28 +08:00
|
|
|
}
|
2021-11-10 19:03:38 +08:00
|
|
|
|
2021-11-24 09:55:15 +08:00
|
|
|
func validateMinioPrefixElements(t *testing.T, cli *minio.Client, bucketName string, prefix string, elements []string) {
|
|
|
|
var current []string
|
|
|
|
for info := range cli.ListObjects(context.TODO(), bucketName, minio.ListObjectsOptions{Prefix: prefix, Recursive: true}) {
|
|
|
|
current = append(current, info.Key)
|
|
|
|
}
|
|
|
|
assert.ElementsMatch(t, elements, current)
|
|
|
|
}
|
|
|
|
|
2021-11-10 19:03:38 +08:00
|
|
|
func Test_garbageCollector_scan(t *testing.T) {
|
|
|
|
bucketName := `datacoord-ut` + strings.ToLower(funcutil.RandomString(8))
|
|
|
|
rootPath := `gc` + funcutil.RandomString(8)
|
2023-09-21 09:45:27 +08:00
|
|
|
// TODO change to Params
|
2021-11-24 09:55:15 +08:00
|
|
|
cli, inserts, stats, delta, others, err := initUtOSSEnv(bucketName, rootPath, 4)
|
2021-11-10 19:03:38 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-11-03 14:41:35 +08:00
|
|
|
meta, err := newMemoryMeta()
|
2023-06-08 15:36:36 +08:00
|
|
|
assert.NoError(t, err)
|
2021-11-10 19:03:38 +08:00
|
|
|
|
2022-05-31 16:36:03 +08:00
|
|
|
t.Run("key is reference", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2022-05-31 16:36:03 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
gc.scan()
|
|
|
|
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta)
|
2022-07-22 22:10:28 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
2022-05-31 16:36:03 +08:00
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
|
2021-11-10 19:03:38 +08:00
|
|
|
t.Run("missing all but save tolerance", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
gc.scan()
|
|
|
|
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta)
|
2022-07-22 22:10:28 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
2021-11-24 09:55:15 +08:00
|
|
|
|
|
|
|
gc.close()
|
2021-11-10 19:03:38 +08:00
|
|
|
})
|
2021-11-16 14:23:21 +08:00
|
|
|
t.Run("hit, no gc", func(t *testing.T) {
|
2022-09-26 18:06:54 +08:00
|
|
|
segment := buildSegment(1, 10, 100, "ch", false)
|
2021-11-10 19:03:38 +08:00
|
|
|
segment.State = commonpb.SegmentState_Flushed
|
2021-12-19 20:00:42 +08:00
|
|
|
segment.Binlogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, inserts[0])}
|
|
|
|
segment.Statslogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, stats[0])}
|
|
|
|
segment.Deltalogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, delta[0])}
|
2023-09-18 09:53:22 +08:00
|
|
|
err = meta.AddSegment(context.TODO(), segment)
|
2021-11-10 19:03:38 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
gc.start()
|
|
|
|
gc.scan()
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats)
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta)
|
2022-07-22 22:10:28 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
2021-11-10 19:03:38 +08:00
|
|
|
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("dropped gc one", func(t *testing.T) {
|
2022-09-26 18:06:54 +08:00
|
|
|
segment := buildSegment(1, 10, 100, "ch", false)
|
2021-11-10 19:03:38 +08:00
|
|
|
segment.State = commonpb.SegmentState_Dropped
|
2021-11-16 14:23:21 +08:00
|
|
|
segment.DroppedAt = uint64(time.Now().Add(-time.Hour).UnixNano())
|
2021-12-19 20:00:42 +08:00
|
|
|
segment.Binlogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, inserts[0])}
|
|
|
|
segment.Statslogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, stats[0])}
|
|
|
|
segment.Deltalogs = []*datapb.FieldBinlog{getFieldBinlogPaths(0, delta[0])}
|
|
|
|
|
2023-09-18 09:53:22 +08:00
|
|
|
err = meta.AddSegment(context.TODO(), segment)
|
2021-11-10 19:03:38 +08:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: 0,
|
|
|
|
})
|
2021-11-23 11:23:15 +08:00
|
|
|
gc.clearEtcd()
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts[1:])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats[1:])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta[1:])
|
2022-07-22 22:10:28 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
2021-11-10 19:03:38 +08:00
|
|
|
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
t.Run("missing gc all", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2021-11-10 19:03:38 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: 0,
|
|
|
|
dropTolerance: 0,
|
|
|
|
})
|
|
|
|
gc.start()
|
|
|
|
gc.scan()
|
2021-11-24 09:55:15 +08:00
|
|
|
gc.clearEtcd()
|
2022-09-26 18:06:54 +08:00
|
|
|
|
|
|
|
// bad path shall remains since datacoord cannot determine file is garbage or not if path is not valid
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts[1:2])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats[1:2])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta[1:2])
|
2022-09-26 18:06:54 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
|
|
|
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("list object with error", func(t *testing.T) {
|
2023-01-04 19:37:36 +08:00
|
|
|
gc := newGarbageCollector(meta, newMockHandler(), GcOption{
|
2022-09-26 18:06:54 +08:00
|
|
|
cli: cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Minute * 30,
|
|
|
|
missingTolerance: 0,
|
|
|
|
dropTolerance: 0,
|
|
|
|
})
|
|
|
|
gc.start()
|
|
|
|
gc.scan()
|
|
|
|
|
2022-07-26 19:32:30 +08:00
|
|
|
// bad path shall remains since datacoord cannot determine file is garbage or not if path is not valid
|
2023-09-19 10:01:23 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentInsertLogPath), inserts[1:2])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentStatslogPath), stats[1:2])
|
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, common.SegmentDeltaLogPath), delta[1:2])
|
2022-07-22 22:10:28 +08:00
|
|
|
validateMinioPrefixElements(t, cli.Client, bucketName, path.Join(rootPath, `indexes`), others)
|
2021-11-10 19:03:38 +08:00
|
|
|
|
|
|
|
gc.close()
|
|
|
|
})
|
|
|
|
|
2022-07-22 22:10:28 +08:00
|
|
|
cleanupOSS(cli.Client, bucketName, rootPath)
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// initialize unit test sso env
|
2022-07-22 22:10:28 +08:00
|
|
|
func initUtOSSEnv(bucket, root string, n int) (mcm *storage.MinioChunkManager, inserts []string, stats []string, delta []string, other []string, err error) {
|
2023-09-05 10:31:48 +08:00
|
|
|
paramtable.Init()
|
2022-11-17 18:59:09 +08:00
|
|
|
cli, err := minio.New(Params.MinioCfg.Address.GetValue(), &minio.Options{
|
|
|
|
Creds: credentials.NewStaticV4(Params.MinioCfg.AccessKeyID.GetValue(), Params.MinioCfg.SecretAccessKey.GetValue(), ""),
|
|
|
|
Secure: Params.MinioCfg.UseSSL.GetAsBool(),
|
2021-11-10 19:03:38 +08:00
|
|
|
})
|
|
|
|
if err != nil {
|
2021-11-24 09:55:15 +08:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
|
|
|
has, err := cli.BucketExists(context.TODO(), bucket)
|
|
|
|
if err != nil {
|
2021-11-24 09:55:15 +08:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
err = cli.MakeBucket(context.TODO(), bucket, minio.MakeBucketOptions{})
|
|
|
|
if err != nil {
|
2021-11-24 09:55:15 +08:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
|
|
|
}
|
2021-11-24 09:55:15 +08:00
|
|
|
inserts = make([]string, 0, n)
|
|
|
|
stats = make([]string, 0, n)
|
|
|
|
delta = make([]string, 0, n)
|
|
|
|
other = make([]string, 0, n)
|
|
|
|
|
2021-11-10 19:03:38 +08:00
|
|
|
content := []byte("test")
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
reader := bytes.NewReader(content)
|
2022-07-26 19:32:30 +08:00
|
|
|
// collID/partID/segID/fieldID/fileName
|
|
|
|
// [str]/id/id/string/string
|
2022-09-30 14:18:55 +08:00
|
|
|
|
|
|
|
var token string
|
2022-05-31 16:36:03 +08:00
|
|
|
if i == 1 {
|
2022-09-30 14:18:55 +08:00
|
|
|
token = path.Join(strconv.Itoa(i), strconv.Itoa(i), "error-seg-id", funcutil.RandomString(8), funcutil.RandomString(8))
|
|
|
|
} else {
|
2022-11-18 15:35:09 +08:00
|
|
|
token = path.Join(strconv.Itoa(1+i), strconv.Itoa(10+i), strconv.Itoa(100+i), funcutil.RandomString(8), funcutil.RandomString(8))
|
2022-05-31 16:36:03 +08:00
|
|
|
}
|
2021-11-24 09:55:15 +08:00
|
|
|
// insert
|
2023-09-19 10:01:23 +08:00
|
|
|
filePath := path.Join(root, common.SegmentInsertLogPath, token)
|
2021-11-24 09:55:15 +08:00
|
|
|
info, err := cli.PutObject(context.TODO(), bucket, filePath, reader, int64(len(content)), minio.PutObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, nil, nil, err
|
|
|
|
}
|
|
|
|
inserts = append(inserts, info.Key)
|
|
|
|
// stats
|
2023-09-19 10:01:23 +08:00
|
|
|
filePath = path.Join(root, common.SegmentStatslogPath, token)
|
2021-11-24 09:55:15 +08:00
|
|
|
info, err = cli.PutObject(context.TODO(), bucket, filePath, reader, int64(len(content)), minio.PutObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, nil, nil, err
|
|
|
|
}
|
|
|
|
stats = append(stats, info.Key)
|
|
|
|
|
|
|
|
// delta
|
2022-09-30 14:18:55 +08:00
|
|
|
if i == 1 {
|
|
|
|
token = path.Join(strconv.Itoa(i), strconv.Itoa(i), "error-seg-id", funcutil.RandomString(8))
|
|
|
|
} else {
|
2022-11-18 15:35:09 +08:00
|
|
|
token = path.Join(strconv.Itoa(1+i), strconv.Itoa(10+i), strconv.Itoa(100+i), funcutil.RandomString(8))
|
2022-09-30 14:18:55 +08:00
|
|
|
}
|
2023-09-19 10:01:23 +08:00
|
|
|
filePath = path.Join(root, common.SegmentDeltaLogPath, token)
|
2021-11-24 09:55:15 +08:00
|
|
|
info, err = cli.PutObject(context.TODO(), bucket, filePath, reader, int64(len(content)), minio.PutObjectOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, nil, nil, err
|
|
|
|
}
|
|
|
|
delta = append(delta, info.Key)
|
|
|
|
|
|
|
|
// other
|
|
|
|
filePath = path.Join(root, `indexes`, token)
|
|
|
|
info, err = cli.PutObject(context.TODO(), bucket, filePath, reader, int64(len(content)), minio.PutObjectOptions{})
|
2021-11-10 19:03:38 +08:00
|
|
|
if err != nil {
|
2021-11-24 09:55:15 +08:00
|
|
|
return nil, nil, nil, nil, nil, err
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
2021-11-24 09:55:15 +08:00
|
|
|
other = append(other, info.Key)
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
2022-07-22 22:10:28 +08:00
|
|
|
mcm = &storage.MinioChunkManager{
|
|
|
|
Client: cli,
|
|
|
|
}
|
2022-09-29 16:18:56 +08:00
|
|
|
mcm.SetVar(bucket, root)
|
2022-07-22 22:10:28 +08:00
|
|
|
return mcm, inserts, stats, delta, other, nil
|
2021-11-10 19:03:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func cleanupOSS(cli *minio.Client, bucket, root string) {
|
|
|
|
ch := cli.ListObjects(context.TODO(), bucket, minio.ListObjectsOptions{Prefix: root, Recursive: true})
|
|
|
|
cli.RemoveObjects(context.TODO(), bucket, ch, minio.RemoveObjectsOptions{})
|
|
|
|
cli.RemoveBucket(context.TODO(), bucket)
|
|
|
|
}
|
2023-01-04 19:37:36 +08:00
|
|
|
|
2023-01-06 14:33:36 +08:00
|
|
|
func createMetaForRecycleUnusedIndexes(catalog metastore.DataCoordCatalog) *meta {
|
2023-01-04 19:37:36 +08:00
|
|
|
var (
|
|
|
|
ctx = context.Background()
|
|
|
|
collID = UniqueID(100)
|
2023-09-21 09:45:27 +08:00
|
|
|
// partID = UniqueID(200)
|
2023-01-04 19:37:36 +08:00
|
|
|
fieldID = UniqueID(300)
|
|
|
|
indexID = UniqueID(400)
|
|
|
|
)
|
|
|
|
return &meta{
|
|
|
|
RWMutex: sync.RWMutex{},
|
|
|
|
ctx: ctx,
|
|
|
|
catalog: catalog,
|
|
|
|
collections: nil,
|
|
|
|
segments: nil,
|
|
|
|
channelCPs: nil,
|
|
|
|
chunkManager: nil,
|
|
|
|
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
|
|
|
collID: {
|
|
|
|
indexID: {
|
|
|
|
TenantID: "",
|
|
|
|
CollectionID: collID,
|
|
|
|
FieldID: fieldID,
|
|
|
|
IndexID: indexID,
|
|
|
|
IndexName: "_default_idx",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
IsAutoIndex: false,
|
|
|
|
UserIndexParams: nil,
|
|
|
|
},
|
|
|
|
indexID + 1: {
|
|
|
|
TenantID: "",
|
|
|
|
CollectionID: collID,
|
|
|
|
FieldID: fieldID + 1,
|
|
|
|
IndexID: indexID + 1,
|
|
|
|
IndexName: "_default_idx_101",
|
|
|
|
IsDeleted: true,
|
|
|
|
CreateTime: 0,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
IsAutoIndex: false,
|
|
|
|
UserIndexParams: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
collID + 1: {
|
|
|
|
indexID + 10: {
|
|
|
|
TenantID: "",
|
|
|
|
CollectionID: collID + 1,
|
|
|
|
FieldID: fieldID + 10,
|
|
|
|
IndexID: indexID + 10,
|
|
|
|
IndexName: "index",
|
|
|
|
IsDeleted: true,
|
|
|
|
CreateTime: 10,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
IsAutoIndex: false,
|
|
|
|
UserIndexParams: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
buildID2SegmentIndex: nil,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGarbageCollector_recycleUnusedIndexes(t *testing.T) {
|
|
|
|
t.Run("success", func(t *testing.T) {
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog := catalogmocks.NewDataCoordCatalog(t)
|
|
|
|
catalog.On("DropIndex",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(nil)
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(createMetaForRecycleUnusedIndexes(catalog), nil, GcOption{})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexes()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("fail", func(t *testing.T) {
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog := catalogmocks.NewDataCoordCatalog(t)
|
|
|
|
catalog.On("DropIndex",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(errors.New("fail"))
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(createMetaForRecycleUnusedIndexes(catalog), nil, GcOption{})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexes()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-06 14:33:36 +08:00
|
|
|
func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *meta {
|
2023-01-04 19:37:36 +08:00
|
|
|
var (
|
|
|
|
ctx = context.Background()
|
|
|
|
collID = UniqueID(100)
|
|
|
|
partID = UniqueID(200)
|
2023-09-21 09:45:27 +08:00
|
|
|
// fieldID = UniqueID(300)
|
2023-01-04 19:37:36 +08:00
|
|
|
indexID = UniqueID(400)
|
|
|
|
segID = UniqueID(500)
|
|
|
|
)
|
|
|
|
return &meta{
|
|
|
|
RWMutex: sync.RWMutex{},
|
|
|
|
ctx: ctx,
|
|
|
|
catalog: catalog,
|
|
|
|
collections: nil,
|
|
|
|
segments: &SegmentsInfo{
|
|
|
|
segments: map[UniqueID]*SegmentInfo{
|
|
|
|
segID: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "",
|
|
|
|
NumOfRows: 1026,
|
|
|
|
State: commonpb.SegmentState_Flushed,
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
segID + 1: {
|
|
|
|
SegmentInfo: nil,
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
channelCPs: nil,
|
|
|
|
chunkManager: nil,
|
|
|
|
indexes: map[UniqueID]map[UniqueID]*model.Index{},
|
|
|
|
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
|
|
|
|
buildID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
buildID + 1: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGarbageCollector_recycleUnusedSegIndexes(t *testing.T) {
|
|
|
|
t.Run("success", func(t *testing.T) {
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog := catalogmocks.NewDataCoordCatalog(t)
|
|
|
|
catalog.On("DropSegmentIndex",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(nil)
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(createMetaForRecycleUnusedSegIndexes(catalog), nil, GcOption{})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedSegIndexes()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("fail", func(t *testing.T) {
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog := catalogmocks.NewDataCoordCatalog(t)
|
|
|
|
catalog.On("DropSegmentIndex",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(errors.New("fail"))
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(createMetaForRecycleUnusedSegIndexes(catalog), nil, GcOption{})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedSegIndexes()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta {
|
|
|
|
var (
|
|
|
|
ctx = context.Background()
|
|
|
|
collID = UniqueID(100)
|
|
|
|
partID = UniqueID(200)
|
2023-09-21 09:45:27 +08:00
|
|
|
// fieldID = UniqueID(300)
|
2023-01-04 19:37:36 +08:00
|
|
|
indexID = UniqueID(400)
|
|
|
|
segID = UniqueID(500)
|
|
|
|
buildID = UniqueID(600)
|
|
|
|
)
|
|
|
|
return &meta{
|
|
|
|
RWMutex: sync.RWMutex{},
|
|
|
|
ctx: ctx,
|
|
|
|
catalog: catalog,
|
|
|
|
collections: nil,
|
|
|
|
segments: &SegmentsInfo{
|
|
|
|
segments: map[UniqueID]*SegmentInfo{
|
|
|
|
segID: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "",
|
|
|
|
NumOfRows: 1026,
|
|
|
|
State: commonpb.SegmentState_Flushed,
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
segID + 1: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "",
|
|
|
|
NumOfRows: 1026,
|
|
|
|
State: commonpb.SegmentState_Flushed,
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_InProgress,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: nil,
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
|
|
|
collID: {
|
|
|
|
indexID: {
|
|
|
|
TenantID: "",
|
|
|
|
CollectionID: collID,
|
|
|
|
FieldID: fieldID,
|
|
|
|
IndexID: indexID,
|
|
|
|
IndexName: "_default_idx",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
IsAutoIndex: false,
|
|
|
|
UserIndexParams: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
|
|
|
|
buildID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
buildID + 1: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 1026,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 1,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_InProgress,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 10,
|
|
|
|
IndexFileKeys: nil,
|
|
|
|
IndexSize: 0,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGarbageCollector_recycleUnusedIndexFiles(t *testing.T) {
|
|
|
|
t.Run("success", func(t *testing.T) {
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
cm.EXPECT().RootPath().Return("root")
|
|
|
|
cm.EXPECT().ListWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return([]string{"a/b/c/", "a/b/600/", "a/b/601/", "a/b/602/"}, nil, nil)
|
|
|
|
cm.EXPECT().RemoveWithPrefix(mock.Anything, mock.Anything).Return(nil)
|
|
|
|
cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(nil)
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(
|
|
|
|
createMetaTableForRecycleUnusedIndexFiles(&datacoord.Catalog{MetaKv: kvmocks.NewMetaKv(t)}),
|
|
|
|
nil,
|
|
|
|
GcOption{
|
2023-01-04 19:37:36 +08:00
|
|
|
cli: cm,
|
2024-01-03 13:16:57 +08:00
|
|
|
})
|
|
|
|
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexFiles()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("list fail", func(t *testing.T) {
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
cm.EXPECT().RootPath().Return("root")
|
|
|
|
cm.EXPECT().ListWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return(nil, nil, errors.New("error"))
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(
|
|
|
|
createMetaTableForRecycleUnusedIndexFiles(&datacoord.Catalog{MetaKv: kvmocks.NewMetaKv(t)}),
|
|
|
|
nil,
|
|
|
|
GcOption{
|
2023-01-04 19:37:36 +08:00
|
|
|
cli: cm,
|
2024-01-03 13:16:57 +08:00
|
|
|
})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexFiles()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("remove fail", func(t *testing.T) {
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
cm.EXPECT().RootPath().Return("root")
|
|
|
|
cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(errors.New("error"))
|
|
|
|
cm.EXPECT().ListWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return([]string{"a/b/c/", "a/b/600/", "a/b/601/", "a/b/602/"}, nil, nil)
|
|
|
|
cm.EXPECT().RemoveWithPrefix(mock.Anything, mock.Anything).Return(nil)
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(
|
|
|
|
createMetaTableForRecycleUnusedIndexFiles(&datacoord.Catalog{MetaKv: kvmocks.NewMetaKv(t)}),
|
|
|
|
nil,
|
|
|
|
GcOption{
|
2023-01-04 19:37:36 +08:00
|
|
|
cli: cm,
|
2024-01-03 13:16:57 +08:00
|
|
|
})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexFiles()
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("remove with prefix fail", func(t *testing.T) {
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
cm.EXPECT().RootPath().Return("root")
|
|
|
|
cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(errors.New("error"))
|
|
|
|
cm.EXPECT().ListWithPrefix(mock.Anything, mock.Anything, mock.Anything).Return([]string{"a/b/c/", "a/b/600/", "a/b/601/", "a/b/602/"}, nil, nil)
|
|
|
|
cm.EXPECT().RemoveWithPrefix(mock.Anything, mock.Anything).Return(errors.New("error"))
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(
|
|
|
|
createMetaTableForRecycleUnusedIndexFiles(&datacoord.Catalog{MetaKv: kvmocks.NewMetaKv(t)}),
|
|
|
|
nil,
|
|
|
|
GcOption{
|
2023-01-04 19:37:36 +08:00
|
|
|
cli: cm,
|
2024-01-03 13:16:57 +08:00
|
|
|
})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.recycleUnusedIndexFiles()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGarbageCollector_clearETCD(t *testing.T) {
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog := catalogmocks.NewDataCoordCatalog(t)
|
2023-03-09 14:13:52 +08:00
|
|
|
catalog.On("ChannelExists",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
2023-08-09 19:39:15 +08:00
|
|
|
).Return(true)
|
2023-03-09 14:13:52 +08:00
|
|
|
catalog.On("DropChannelCheckpoint",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
2023-08-09 19:39:15 +08:00
|
|
|
).Return(nil).Maybe()
|
2023-01-06 14:33:36 +08:00
|
|
|
catalog.On("CreateSegmentIndex",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(nil)
|
|
|
|
catalog.On("AlterSegmentIndexes",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(nil)
|
|
|
|
catalog.On("DropSegment",
|
|
|
|
mock.Anything,
|
|
|
|
mock.Anything,
|
|
|
|
).Return(nil)
|
|
|
|
|
2023-11-30 11:00:28 +08:00
|
|
|
channelCPs := typeutil.NewConcurrentMap[string, *msgpb.MsgPosition]()
|
|
|
|
channelCPs.Insert("dmlChannel", &msgpb.MsgPosition{Timestamp: 1000})
|
2023-01-04 19:37:36 +08:00
|
|
|
m := &meta{
|
2023-11-30 11:00:28 +08:00
|
|
|
catalog: catalog,
|
|
|
|
channelCPLocks: lock.NewKeyLock[string](),
|
|
|
|
channelCPs: channelCPs,
|
2023-01-04 19:37:36 +08:00
|
|
|
segments: &SegmentsInfo{
|
|
|
|
map[UniqueID]*SegmentInfo{
|
|
|
|
segID: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
2023-08-09 19:39:15 +08:00
|
|
|
InsertChannel: "dmlChannel",
|
2023-01-04 19:37:36 +08:00
|
|
|
NumOfRows: 5000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65536,
|
|
|
|
DroppedAt: 0,
|
2023-08-09 19:39:15 +08:00
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
2024-01-03 13:16:57 +08:00
|
|
|
Binlogs: []*datapb.FieldBinlog{
|
|
|
|
{
|
|
|
|
FieldID: 1,
|
|
|
|
Binlogs: []*datapb.Binlog{
|
|
|
|
{
|
|
|
|
LogPath: "log1",
|
|
|
|
LogSize: 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
FieldID: 2,
|
|
|
|
Binlogs: []*datapb.Binlog{
|
|
|
|
{
|
|
|
|
LogPath: "log2",
|
|
|
|
LogSize: 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Deltalogs: []*datapb.FieldBinlog{
|
|
|
|
{
|
|
|
|
FieldID: 1,
|
|
|
|
Binlogs: []*datapb.Binlog{
|
|
|
|
{
|
|
|
|
LogPath: "del_log1",
|
|
|
|
LogSize: 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
FieldID: 2,
|
|
|
|
Binlogs: []*datapb.Binlog{
|
|
|
|
{
|
|
|
|
LogPath: "del_log2",
|
|
|
|
LogSize: 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Statslogs: []*datapb.FieldBinlog{
|
|
|
|
{
|
|
|
|
FieldID: 1,
|
|
|
|
Binlogs: []*datapb.Binlog{
|
|
|
|
{
|
|
|
|
LogPath: "stats_log1",
|
|
|
|
LogSize: 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 5000,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 0,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 0,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 1024,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
segID + 1: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
2023-08-09 19:39:15 +08:00
|
|
|
InsertChannel: "dmlChannel",
|
2023-01-04 19:37:36 +08:00
|
|
|
NumOfRows: 5000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65536,
|
|
|
|
DroppedAt: 0,
|
2023-08-09 19:39:15 +08:00
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{
|
|
|
|
indexID: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 5000,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 0,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 0,
|
|
|
|
IndexFileKeys: []string{"file3", "file4"},
|
|
|
|
IndexSize: 1024,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
segID + 2: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
2023-08-09 19:39:15 +08:00
|
|
|
ID: segID + 2,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 10000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65536,
|
|
|
|
DroppedAt: 10,
|
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
CompactionFrom: []int64{segID, segID + 1},
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
|
|
|
|
},
|
|
|
|
segID + 3: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
2023-08-09 19:39:15 +08:00
|
|
|
ID: segID + 3,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 2000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65536,
|
|
|
|
DroppedAt: 10,
|
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
CompactionFrom: nil,
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
|
|
|
|
},
|
|
|
|
segID + 4: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
2023-08-09 19:39:15 +08:00
|
|
|
ID: segID + 4,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 12000,
|
|
|
|
State: commonpb.SegmentState_Flushed,
|
|
|
|
MaxRowNum: 65536,
|
|
|
|
DroppedAt: 10,
|
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
CompactionFrom: []int64{segID + 2, segID + 3},
|
|
|
|
},
|
|
|
|
segmentIndexes: map[UniqueID]*model.SegmentIndex{},
|
|
|
|
},
|
2023-01-06 21:33:36 +08:00
|
|
|
segID + 5: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID + 5,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 2000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65535,
|
|
|
|
DroppedAt: 0,
|
|
|
|
CompactionFrom: nil,
|
2023-03-04 23:21:50 +08:00
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
2023-01-06 21:33:36 +08:00
|
|
|
Timestamp: 1200,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2023-08-09 19:39:15 +08:00
|
|
|
segID + 6: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID + 6,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 2000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65535,
|
|
|
|
DroppedAt: uint64(time.Now().Add(time.Hour).UnixNano()),
|
|
|
|
CompactionFrom: nil,
|
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 900,
|
|
|
|
},
|
|
|
|
Compacted: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// compacted and child is GCed, dml pos is big than channel cp
|
|
|
|
segID + 7: {
|
|
|
|
SegmentInfo: &datapb.SegmentInfo{
|
|
|
|
ID: segID + 7,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
InsertChannel: "dmlChannel",
|
|
|
|
NumOfRows: 2000,
|
|
|
|
State: commonpb.SegmentState_Dropped,
|
|
|
|
MaxRowNum: 65535,
|
|
|
|
DroppedAt: 0,
|
|
|
|
CompactionFrom: nil,
|
|
|
|
DmlPosition: &msgpb.MsgPosition{
|
|
|
|
Timestamp: 1200,
|
|
|
|
},
|
|
|
|
Compacted: true,
|
|
|
|
},
|
|
|
|
},
|
2023-01-04 19:37:36 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
buildID2SegmentIndex: map[UniqueID]*model.SegmentIndex{
|
|
|
|
buildID: {
|
|
|
|
SegmentID: segID,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 5000,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID,
|
|
|
|
NodeID: 0,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 0,
|
|
|
|
IndexFileKeys: []string{"file1", "file2"},
|
|
|
|
IndexSize: 1024,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
buildID + 1: {
|
|
|
|
SegmentID: segID + 1,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 5000,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 1,
|
|
|
|
NodeID: 0,
|
|
|
|
IndexVersion: 1,
|
|
|
|
IndexState: commonpb.IndexState_Finished,
|
|
|
|
FailReason: "",
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 0,
|
|
|
|
IndexFileKeys: []string{"file3", "file4"},
|
|
|
|
IndexSize: 1024,
|
|
|
|
WriteHandoff: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
|
|
|
collID: {
|
|
|
|
indexID: {
|
|
|
|
TenantID: "",
|
|
|
|
CollectionID: collID,
|
|
|
|
FieldID: fieldID,
|
|
|
|
IndexID: indexID,
|
|
|
|
IndexName: indexName,
|
|
|
|
IsDeleted: false,
|
|
|
|
CreateTime: 0,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
IsAutoIndex: false,
|
|
|
|
UserIndexParams: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
collections: map[UniqueID]*collectionInfo{
|
|
|
|
collID: {
|
|
|
|
ID: collID,
|
|
|
|
Schema: &schemapb.CollectionSchema{
|
|
|
|
Name: "",
|
|
|
|
Description: "",
|
|
|
|
AutoID: false,
|
|
|
|
Fields: []*schemapb.FieldSchema{
|
|
|
|
{
|
|
|
|
FieldID: fieldID,
|
|
|
|
Name: "",
|
|
|
|
IsPrimaryKey: false,
|
|
|
|
Description: "",
|
|
|
|
DataType: schemapb.DataType_FloatVector,
|
|
|
|
TypeParams: nil,
|
|
|
|
IndexParams: nil,
|
|
|
|
AutoID: false,
|
|
|
|
State: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Partitions: nil,
|
|
|
|
StartPositions: nil,
|
|
|
|
Properties: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(nil)
|
2024-01-03 13:16:57 +08:00
|
|
|
gc := newGarbageCollector(
|
|
|
|
m,
|
|
|
|
newMockHandlerWithMeta(m),
|
|
|
|
GcOption{
|
|
|
|
cli: cm,
|
2023-01-04 19:37:36 +08:00
|
|
|
dropTolerance: 1,
|
2024-01-03 13:16:57 +08:00
|
|
|
})
|
2023-01-04 19:37:36 +08:00
|
|
|
gc.clearEtcd()
|
|
|
|
|
2023-08-09 19:39:15 +08:00
|
|
|
/*
|
|
|
|
A B
|
|
|
|
\ /
|
|
|
|
C D
|
|
|
|
\ /
|
|
|
|
E
|
|
|
|
|
|
|
|
E: flushed, not indexed, should not be GCed
|
|
|
|
D: dropped, not indexed, should not be GCed, since E is not GCed
|
|
|
|
C: dropped, not indexed, should not be GCed, since E is not GCed
|
|
|
|
A: dropped, indexed, should not be GCed, since C is not indexed
|
|
|
|
B: dropped, indexed, should not be GCed, since C is not indexed
|
|
|
|
|
|
|
|
F: dropped, compcated is false, should not be GCed, since dml position is larger than channel cp
|
|
|
|
G: dropped, compacted is true, missing child info, should be GCed since dml pos is less than channel cp, FAST GC do not wait drop tolerance
|
|
|
|
H: dropped, compacted is true, missing child info, should not be GCed since dml pos is larger than channel cp
|
|
|
|
|
|
|
|
conclusion: only G is GCed.
|
|
|
|
*/
|
2023-03-03 14:13:49 +08:00
|
|
|
segA := gc.meta.GetSegment(segID)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.NotNil(t, segA)
|
2023-03-03 14:13:49 +08:00
|
|
|
segB := gc.meta.GetSegment(segID + 1)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.NotNil(t, segB)
|
2023-03-03 14:13:49 +08:00
|
|
|
segC := gc.meta.GetSegment(segID + 2)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.NotNil(t, segC)
|
2023-03-03 14:13:49 +08:00
|
|
|
segD := gc.meta.GetSegment(segID + 3)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.NotNil(t, segD)
|
2023-03-03 14:13:49 +08:00
|
|
|
segE := gc.meta.GetSegment(segID + 4)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.NotNil(t, segE)
|
2023-03-03 14:13:49 +08:00
|
|
|
segF := gc.meta.GetSegment(segID + 5)
|
2023-08-09 19:39:15 +08:00
|
|
|
assert.NotNil(t, segF)
|
|
|
|
segG := gc.meta.GetSegment(segID + 6)
|
|
|
|
assert.Nil(t, segG)
|
|
|
|
segH := gc.meta.GetSegment(segID + 7)
|
|
|
|
assert.NotNil(t, segH)
|
2023-01-04 19:37:36 +08:00
|
|
|
err := gc.meta.AddSegmentIndex(&model.SegmentIndex{
|
|
|
|
SegmentID: segID + 4,
|
|
|
|
CollectionID: collID,
|
|
|
|
PartitionID: partID,
|
|
|
|
NumRows: 12000,
|
|
|
|
IndexID: indexID,
|
|
|
|
BuildID: buildID + 4,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
err = gc.meta.FinishTask(&indexpb.IndexTaskInfo{
|
|
|
|
BuildID: buildID + 4,
|
|
|
|
State: commonpb.IndexState_Finished,
|
|
|
|
IndexFileKeys: []string{"file1", "file2", "file3", "file4"},
|
|
|
|
SerializedSize: 10240,
|
|
|
|
FailReason: "",
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
gc.clearEtcd()
|
2023-08-09 19:39:15 +08:00
|
|
|
/*
|
|
|
|
|
|
|
|
A: processed prior to C, C is not GCed yet and C is not indexed, A is not GCed in this turn
|
|
|
|
B: processed prior to C, C is not GCed yet and C is not indexed, B is not GCed in this turn
|
|
|
|
|
|
|
|
E: flushed, indexed, should not be GCed
|
|
|
|
C: dropped, not indexed, should be GCed since E is indexed
|
|
|
|
D: dropped, not indexed, should be GCed since E is indexed
|
|
|
|
*/
|
|
|
|
|
2023-03-03 14:13:49 +08:00
|
|
|
segC = gc.meta.GetSegment(segID + 2)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.Nil(t, segC)
|
2023-03-03 14:13:49 +08:00
|
|
|
segD = gc.meta.GetSegment(segID + 3)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.Nil(t, segD)
|
|
|
|
|
|
|
|
gc.clearEtcd()
|
2023-08-09 19:39:15 +08:00
|
|
|
/*
|
|
|
|
A: compacted became false due to C is GCed already, A should be GCed since dropTolernace is meet
|
|
|
|
B: compacted became false due to C is GCed already, B should be GCed since dropTolerance is meet
|
|
|
|
*/
|
2023-03-03 14:13:49 +08:00
|
|
|
segA = gc.meta.GetSegment(segID)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.Nil(t, segA)
|
2023-03-03 14:13:49 +08:00
|
|
|
segB = gc.meta.GetSegment(segID + 1)
|
2023-01-04 19:37:36 +08:00
|
|
|
assert.Nil(t, segB)
|
|
|
|
}
|
2023-12-14 19:26:39 +08:00
|
|
|
|
2024-01-03 13:16:57 +08:00
|
|
|
func TestGarbageCollector_removelogs(t *testing.T) {
|
|
|
|
paramtable.Init()
|
|
|
|
cm := &mocks.ChunkManager{}
|
|
|
|
gc := newGarbageCollector(
|
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
GcOption{
|
|
|
|
cli: cm,
|
|
|
|
dropTolerance: 1,
|
|
|
|
})
|
|
|
|
var logs []*datapb.Binlog
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
logs = append(logs, &datapb.Binlog{
|
|
|
|
LogPath: "log" + strconv.Itoa(i),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("success", func(t *testing.T) {
|
|
|
|
call := cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(nil)
|
|
|
|
defer call.Unset()
|
|
|
|
b := gc.removeLogs(logs)
|
|
|
|
assert.True(t, b)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("minio not found error", func(t *testing.T) {
|
|
|
|
call := cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(minio.ErrorResponse{
|
|
|
|
Code: "NoSuchKey",
|
|
|
|
})
|
|
|
|
defer call.Unset()
|
|
|
|
b := gc.removeLogs(logs)
|
|
|
|
assert.True(t, b)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("minio server error", func(t *testing.T) {
|
|
|
|
call := cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(minio.ErrorResponse{
|
|
|
|
Code: "Server Error",
|
|
|
|
})
|
|
|
|
defer call.Unset()
|
|
|
|
b := gc.removeLogs(logs)
|
|
|
|
assert.False(t, b)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("other type error", func(t *testing.T) {
|
|
|
|
call := cm.EXPECT().Remove(mock.Anything, mock.Anything).Return(errors.New("other error"))
|
|
|
|
defer call.Unset()
|
|
|
|
b := gc.removeLogs(logs)
|
|
|
|
assert.False(t, b)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-12-14 19:26:39 +08:00
|
|
|
type GarbageCollectorSuite struct {
|
|
|
|
suite.Suite
|
|
|
|
|
|
|
|
bucketName string
|
|
|
|
rootPath string
|
|
|
|
|
|
|
|
cli *storage.MinioChunkManager
|
|
|
|
inserts []string
|
|
|
|
stats []string
|
|
|
|
delta []string
|
|
|
|
others []string
|
|
|
|
|
|
|
|
meta *meta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GarbageCollectorSuite) SetupTest() {
|
|
|
|
s.bucketName = `datacoord-ut` + strings.ToLower(funcutil.RandomString(8))
|
|
|
|
s.rootPath = `gc` + funcutil.RandomString(8)
|
|
|
|
|
|
|
|
var err error
|
|
|
|
s.cli, s.inserts, s.stats, s.delta, s.others, err = initUtOSSEnv(s.bucketName, s.rootPath, 4)
|
|
|
|
s.Require().NoError(err)
|
|
|
|
|
|
|
|
s.meta, err = newMemoryMeta()
|
|
|
|
s.Require().NoError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GarbageCollectorSuite) TearDownTest() {
|
|
|
|
cleanupOSS(s.cli.Client, s.bucketName, s.rootPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *GarbageCollectorSuite) TestPauseResume() {
|
|
|
|
s.Run("not_enabled", func() {
|
|
|
|
gc := newGarbageCollector(s.meta, newMockHandler(), GcOption{
|
|
|
|
cli: s.cli,
|
|
|
|
enabled: false,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
|
|
|
|
gc.start()
|
|
|
|
defer gc.close()
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
err := gc.Pause(ctx, time.Second)
|
|
|
|
s.NoError(err)
|
|
|
|
|
|
|
|
err = gc.Resume(ctx)
|
|
|
|
s.Error(err)
|
|
|
|
})
|
|
|
|
|
|
|
|
s.Run("pause_then_resume", func() {
|
|
|
|
gc := newGarbageCollector(s.meta, newMockHandler(), GcOption{
|
|
|
|
cli: s.cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
|
|
|
|
gc.start()
|
|
|
|
defer gc.close()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
err := gc.Pause(ctx, time.Minute)
|
|
|
|
s.NoError(err)
|
|
|
|
|
|
|
|
s.NotZero(gc.pauseUntil.Load())
|
|
|
|
|
|
|
|
err = gc.Resume(ctx)
|
|
|
|
s.NoError(err)
|
|
|
|
|
|
|
|
s.Zero(gc.pauseUntil.Load())
|
|
|
|
})
|
|
|
|
|
|
|
|
s.Run("pause_before_until", func() {
|
|
|
|
gc := newGarbageCollector(s.meta, newMockHandler(), GcOption{
|
|
|
|
cli: s.cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
|
|
|
|
gc.start()
|
|
|
|
defer gc.close()
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
err := gc.Pause(ctx, time.Minute)
|
|
|
|
s.NoError(err)
|
|
|
|
|
|
|
|
until := gc.pauseUntil.Load()
|
|
|
|
s.NotZero(until)
|
|
|
|
|
|
|
|
err = gc.Pause(ctx, time.Second)
|
|
|
|
s.NoError(err)
|
|
|
|
|
|
|
|
second := gc.pauseUntil.Load()
|
|
|
|
|
|
|
|
s.Equal(until, second)
|
|
|
|
})
|
|
|
|
|
|
|
|
s.Run("pause_resume_timeout", func() {
|
|
|
|
gc := newGarbageCollector(s.meta, newMockHandler(), GcOption{
|
|
|
|
cli: s.cli,
|
|
|
|
enabled: true,
|
|
|
|
checkInterval: time.Millisecond * 10,
|
|
|
|
missingTolerance: time.Hour * 24,
|
|
|
|
dropTolerance: time.Hour * 24,
|
|
|
|
})
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond)
|
|
|
|
defer cancel()
|
|
|
|
err := gc.Pause(ctx, time.Minute)
|
|
|
|
s.Error(err)
|
|
|
|
|
|
|
|
s.Zero(gc.pauseUntil.Load())
|
|
|
|
|
|
|
|
err = gc.Resume(ctx)
|
|
|
|
s.Error(err)
|
|
|
|
|
|
|
|
s.Zero(gc.pauseUntil.Load())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGarbageCollector(t *testing.T) {
|
|
|
|
suite.Run(t, new(GarbageCollectorSuite))
|
|
|
|
}
|