2021-12-01 19:33:32 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2021-11-05 22:25:00 +08:00
|
|
|
package datacoord
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2024-06-05 10:17:50 +08:00
|
|
|
"sort"
|
2021-11-05 22:25:00 +08:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2023-02-26 11:31:49 +08:00
|
|
|
"github.com/cockroachdb/errors"
|
2023-11-07 03:18:18 +08:00
|
|
|
"github.com/samber/lo"
|
2024-01-23 10:37:00 +08:00
|
|
|
"go.opentelemetry.io/otel"
|
2024-06-05 10:17:50 +08:00
|
|
|
"go.uber.org/atomic"
|
2023-04-06 19:14:32 +08:00
|
|
|
"go.uber.org/zap"
|
2023-02-26 11:31:49 +08:00
|
|
|
|
2023-06-09 01:28:37 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
2024-06-05 10:17:50 +08:00
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
2021-11-05 22:25:00 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2023-04-06 19:14:32 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
2023-11-07 03:18:18 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/conc"
|
2024-05-15 16:33:34 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/lock"
|
2024-03-06 21:36:59 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
2024-01-23 10:37:00 +08:00
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
2021-11-05 22:25:00 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type compactionPlanContext interface {
|
|
|
|
start()
|
|
|
|
stop()
|
2024-06-05 10:17:50 +08:00
|
|
|
// enqueueCompaction start to enqueue compaction task and return immediately
|
|
|
|
enqueueCompaction(task *datapb.CompactionTask) error
|
2021-11-05 22:25:00 +08:00
|
|
|
// isFull return true if the task pool is full
|
|
|
|
isFull() bool
|
2021-11-09 14:47:02 +08:00
|
|
|
// get compaction tasks by signal id
|
2024-06-05 10:17:50 +08:00
|
|
|
getCompactionTasksNumBySignalID(signalID int64) int
|
|
|
|
getCompactionInfo(signalID int64) *compactionInfo
|
2023-11-29 10:50:29 +08:00
|
|
|
removeTasksByChannel(channel string)
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
errChannelNotWatched = errors.New("channel is not watched")
|
|
|
|
errChannelInBuffer = errors.New("channel is in buffer")
|
2024-06-05 10:17:50 +08:00
|
|
|
errCompactionBusy = errors.New("compaction task queue is full")
|
2021-11-05 22:25:00 +08:00
|
|
|
)
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
var _ compactionPlanContext = (*compactionPlanHandler)(nil)
|
2023-12-05 18:44:37 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
type compactionInfo struct {
|
|
|
|
state commonpb.CompactionState
|
|
|
|
executingCnt int
|
|
|
|
completedCnt int
|
|
|
|
failedCnt int
|
|
|
|
timeoutCnt int
|
|
|
|
mergeInfos map[int64]*milvuspb.CompactionMergeInfo
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type compactionPlanHandler struct {
|
2024-06-05 10:17:50 +08:00
|
|
|
mu lock.RWMutex
|
|
|
|
queueTasks map[int64]CompactionTask // planID -> task
|
|
|
|
|
|
|
|
executingMu lock.RWMutex
|
|
|
|
executingTasks map[int64]CompactionTask // planID -> task
|
2023-11-29 10:50:29 +08:00
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
meta CompactionMeta
|
|
|
|
allocator allocator
|
|
|
|
chManager ChannelManager
|
|
|
|
sessions SessionManager
|
|
|
|
cluster Cluster
|
|
|
|
analyzeScheduler *taskScheduler
|
|
|
|
handler Handler
|
2023-11-29 10:50:29 +08:00
|
|
|
|
|
|
|
stopCh chan struct{}
|
|
|
|
stopOnce sync.Once
|
|
|
|
stopWg sync.WaitGroup
|
2024-06-05 10:17:50 +08:00
|
|
|
|
|
|
|
taskNumber *atomic.Int32
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) getCompactionInfo(triggerID int64) *compactionInfo {
|
2024-06-10 21:34:08 +08:00
|
|
|
tasks := c.meta.GetCompactionTasksByTriggerID(triggerID)
|
|
|
|
return summaryCompactionState(tasks)
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
func summaryCompactionState(tasks []*datapb.CompactionTask) *compactionInfo {
|
|
|
|
ret := &compactionInfo{}
|
|
|
|
var executingCnt, pipeliningCnt, completedCnt, failedCnt, timeoutCnt, analyzingCnt, indexingCnt, cleanedCnt, metaSavedCnt int
|
2024-06-05 10:17:50 +08:00
|
|
|
mergeInfos := make(map[int64]*milvuspb.CompactionMergeInfo)
|
2024-06-10 21:34:08 +08:00
|
|
|
|
|
|
|
for _, task := range tasks {
|
|
|
|
if task == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch task.GetState() {
|
|
|
|
case datapb.CompactionTaskState_executing:
|
2024-06-05 10:17:50 +08:00
|
|
|
executingCnt++
|
2024-06-10 21:34:08 +08:00
|
|
|
case datapb.CompactionTaskState_pipelining:
|
|
|
|
pipeliningCnt++
|
2024-06-05 10:17:50 +08:00
|
|
|
case datapb.CompactionTaskState_completed:
|
|
|
|
completedCnt++
|
|
|
|
case datapb.CompactionTaskState_failed:
|
|
|
|
failedCnt++
|
|
|
|
case datapb.CompactionTaskState_timeout:
|
|
|
|
timeoutCnt++
|
2024-06-10 21:34:08 +08:00
|
|
|
case datapb.CompactionTaskState_analyzing:
|
|
|
|
analyzingCnt++
|
|
|
|
case datapb.CompactionTaskState_indexing:
|
|
|
|
indexingCnt++
|
|
|
|
case datapb.CompactionTaskState_cleaned:
|
|
|
|
cleanedCnt++
|
|
|
|
case datapb.CompactionTaskState_meta_saved:
|
|
|
|
metaSavedCnt++
|
|
|
|
default:
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
2024-06-10 21:34:08 +08:00
|
|
|
mergeInfos[task.GetPlanID()] = getCompactionMergeInfo(task)
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
ret.executingCnt = executingCnt + pipeliningCnt + analyzingCnt + indexingCnt + metaSavedCnt
|
2024-06-05 10:17:50 +08:00
|
|
|
ret.completedCnt = completedCnt
|
|
|
|
ret.timeoutCnt = timeoutCnt
|
|
|
|
ret.failedCnt = failedCnt
|
|
|
|
ret.mergeInfos = mergeInfos
|
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
if ret.executingCnt != 0 {
|
2024-06-05 10:17:50 +08:00
|
|
|
ret.state = commonpb.CompactionState_Executing
|
|
|
|
} else {
|
|
|
|
ret.state = commonpb.CompactionState_Completed
|
|
|
|
}
|
2024-06-10 21:34:08 +08:00
|
|
|
|
|
|
|
log.Info("compaction states",
|
|
|
|
zap.String("state", ret.state.String()),
|
|
|
|
zap.Int("executingCnt", executingCnt),
|
|
|
|
zap.Int("pipeliningCnt", pipeliningCnt),
|
|
|
|
zap.Int("completedCnt", completedCnt),
|
|
|
|
zap.Int("failedCnt", failedCnt),
|
|
|
|
zap.Int("timeoutCnt", timeoutCnt),
|
|
|
|
zap.Int("analyzingCnt", analyzingCnt),
|
|
|
|
zap.Int("indexingCnt", indexingCnt),
|
|
|
|
zap.Int("cleanedCnt", cleanedCnt),
|
|
|
|
zap.Int("metaSavedCnt", metaSavedCnt))
|
2024-06-05 10:17:50 +08:00
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) getCompactionTasksNumBySignalID(triggerID int64) int {
|
|
|
|
cnt := 0
|
|
|
|
c.mu.RLock()
|
|
|
|
for _, t := range c.queueTasks {
|
|
|
|
if t.GetTriggerID() == triggerID {
|
|
|
|
cnt += 1
|
|
|
|
}
|
|
|
|
// if t.GetPlanID()
|
|
|
|
}
|
|
|
|
c.mu.RUnlock()
|
|
|
|
c.executingMu.RLock()
|
|
|
|
for _, t := range c.executingTasks {
|
|
|
|
if t.GetTriggerID() == triggerID {
|
|
|
|
cnt += 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.executingMu.RUnlock()
|
|
|
|
return cnt
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
func newCompactionPlanHandler(cluster Cluster, sessions SessionManager, cm ChannelManager, meta CompactionMeta, allocator allocator, analyzeScheduler *taskScheduler, handler Handler,
|
2023-09-21 09:45:27 +08:00
|
|
|
) *compactionPlanHandler {
|
2021-11-05 22:25:00 +08:00
|
|
|
return &compactionPlanHandler{
|
2024-06-10 21:34:08 +08:00
|
|
|
queueTasks: make(map[int64]CompactionTask),
|
|
|
|
chManager: cm,
|
|
|
|
meta: meta,
|
|
|
|
sessions: sessions,
|
|
|
|
allocator: allocator,
|
|
|
|
stopCh: make(chan struct{}),
|
|
|
|
cluster: cluster,
|
|
|
|
executingTasks: make(map[int64]CompactionTask),
|
|
|
|
taskNumber: atomic.NewInt32(0),
|
|
|
|
analyzeScheduler: analyzeScheduler,
|
|
|
|
handler: handler,
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) schedule() []CompactionTask {
|
|
|
|
c.mu.RLock()
|
|
|
|
if len(c.queueTasks) == 0 {
|
|
|
|
c.mu.RUnlock()
|
|
|
|
return nil
|
2023-12-22 12:00:43 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.mu.RUnlock()
|
|
|
|
|
|
|
|
l0ChannelExcludes := typeutil.NewSet[string]()
|
|
|
|
mixChannelExcludes := typeutil.NewSet[string]()
|
2024-06-10 21:34:08 +08:00
|
|
|
clusterChannelExcludes := typeutil.NewSet[string]()
|
2024-06-05 10:17:50 +08:00
|
|
|
mixLabelExcludes := typeutil.NewSet[string]()
|
2024-06-10 21:34:08 +08:00
|
|
|
clusterLabelExcludes := typeutil.NewSet[string]()
|
2024-06-05 10:17:50 +08:00
|
|
|
|
|
|
|
c.executingMu.RLock()
|
|
|
|
for _, t := range c.executingTasks {
|
|
|
|
switch t.GetType() {
|
|
|
|
case datapb.CompactionType_Level0DeleteCompaction:
|
|
|
|
l0ChannelExcludes.Insert(t.GetChannel())
|
|
|
|
case datapb.CompactionType_MixCompaction:
|
|
|
|
mixChannelExcludes.Insert(t.GetChannel())
|
|
|
|
mixLabelExcludes.Insert(t.GetLabel())
|
2024-06-10 21:34:08 +08:00
|
|
|
case datapb.CompactionType_ClusteringCompaction:
|
|
|
|
clusterChannelExcludes.Insert(t.GetChannel())
|
|
|
|
clusterLabelExcludes.Insert(t.GetLabel())
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
2024-03-19 01:01:36 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.RUnlock()
|
2023-12-22 12:00:43 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
var picked []CompactionTask
|
|
|
|
c.mu.RLock()
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
keys := lo.Keys(c.queueTasks)
|
|
|
|
sort.SliceStable(keys, func(i, j int) bool {
|
|
|
|
return keys[i] < keys[j]
|
|
|
|
})
|
|
|
|
for _, planID := range keys {
|
|
|
|
t := c.queueTasks[planID]
|
|
|
|
switch t.GetType() {
|
|
|
|
case datapb.CompactionType_Level0DeleteCompaction:
|
|
|
|
if l0ChannelExcludes.Contain(t.GetChannel()) ||
|
|
|
|
mixChannelExcludes.Contain(t.GetChannel()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
picked = append(picked, t)
|
|
|
|
l0ChannelExcludes.Insert(t.GetChannel())
|
|
|
|
case datapb.CompactionType_MixCompaction:
|
|
|
|
if l0ChannelExcludes.Contain(t.GetChannel()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
picked = append(picked, t)
|
|
|
|
mixChannelExcludes.Insert(t.GetChannel())
|
|
|
|
mixLabelExcludes.Insert(t.GetLabel())
|
2024-06-10 21:34:08 +08:00
|
|
|
case datapb.CompactionType_ClusteringCompaction:
|
|
|
|
if l0ChannelExcludes.Contain(t.GetChannel()) ||
|
|
|
|
mixLabelExcludes.Contain(t.GetLabel()) ||
|
|
|
|
clusterLabelExcludes.Contain(t.GetLabel()) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
picked = append(picked, t)
|
|
|
|
clusterChannelExcludes.Insert(t.GetChannel())
|
|
|
|
clusterLabelExcludes.Insert(t.GetLabel())
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
return picked
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) start() {
|
2024-06-10 21:34:08 +08:00
|
|
|
c.loadMeta()
|
2024-06-05 10:17:50 +08:00
|
|
|
c.stopWg.Add(3)
|
|
|
|
go c.loopSchedule()
|
|
|
|
go c.loopCheck()
|
|
|
|
go c.loopClean()
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
func (c *compactionPlanHandler) loadMeta() {
|
|
|
|
// todo: make it compatible to all types of compaction with persist meta
|
|
|
|
triggers := c.meta.(*meta).compactionTaskMeta.GetCompactionTasks()
|
|
|
|
for _, tasks := range triggers {
|
|
|
|
for _, task := range tasks {
|
2024-06-25 10:08:03 +08:00
|
|
|
state := task.GetState()
|
|
|
|
if state == datapb.CompactionTaskState_completed ||
|
|
|
|
state == datapb.CompactionTaskState_cleaned ||
|
|
|
|
state == datapb.CompactionTaskState_unknown {
|
|
|
|
log.Info("compactionPlanHandler loadMeta abandon compactionTask",
|
|
|
|
zap.Int64("planID", task.GetPlanID()),
|
|
|
|
zap.String("State", task.GetState().String()))
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
t, err := c.createCompactTask(task)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("compactionPlanHandler loadMeta create compactionTask failed",
|
|
|
|
zap.Int64("planID", task.GetPlanID()),
|
|
|
|
zap.String("State", task.GetState().String()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if t.NeedReAssignNodeID() {
|
|
|
|
c.submitTask(t)
|
|
|
|
log.Info("compactionPlanHandler loadMeta submitTask",
|
|
|
|
zap.Int64("planID", t.GetPlanID()),
|
|
|
|
zap.Int64("triggerID", t.GetTriggerID()),
|
|
|
|
zap.Int64("collectionID", t.GetCollectionID()),
|
|
|
|
zap.String("state", t.GetState().String()))
|
|
|
|
} else {
|
|
|
|
c.restoreTask(t)
|
|
|
|
log.Info("compactionPlanHandler loadMeta restoreTask",
|
|
|
|
zap.Int64("planID", t.GetPlanID()),
|
|
|
|
zap.Int64("triggerID", t.GetTriggerID()),
|
|
|
|
zap.Int64("collectionID", t.GetCollectionID()),
|
|
|
|
zap.String("state", t.GetState().String()))
|
|
|
|
}
|
2024-06-10 21:34:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) doSchedule() {
|
|
|
|
picked := c.schedule()
|
|
|
|
if len(picked) > 0 {
|
|
|
|
c.executingMu.Lock()
|
|
|
|
for _, t := range picked {
|
|
|
|
c.executingTasks[t.GetPlanID()] = t
|
|
|
|
}
|
|
|
|
c.executingMu.Unlock()
|
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
for _, t := range picked {
|
|
|
|
delete(c.queueTasks, t.GetPlanID())
|
|
|
|
}
|
|
|
|
c.mu.Unlock()
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) loopSchedule() {
|
|
|
|
log.Info("compactionPlanHandler start loop schedule")
|
|
|
|
defer c.stopWg.Done()
|
2021-11-05 22:25:00 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
scheduleTicker := time.NewTicker(3 * time.Second)
|
|
|
|
defer scheduleTicker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.stopCh:
|
|
|
|
log.Info("compactionPlanHandler quit loop schedule")
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-scheduleTicker.C:
|
|
|
|
c.doSchedule()
|
2023-11-29 10:50:29 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) loopCheck() {
|
|
|
|
interval := Params.DataCoordCfg.CompactionCheckIntervalInSeconds.GetAsDuration(time.Second)
|
|
|
|
log.Info("compactionPlanHandler start loop check", zap.Any("check result interval", interval))
|
|
|
|
defer c.stopWg.Done()
|
|
|
|
checkResultTicker := time.NewTicker(interval)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.stopCh:
|
|
|
|
log.Info("compactionPlanHandler quit loop check")
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-checkResultTicker.C:
|
|
|
|
err := c.checkCompaction()
|
|
|
|
if err != nil {
|
|
|
|
log.Info("fail to update compaction", zap.Error(err))
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) loopClean() {
|
|
|
|
defer c.stopWg.Done()
|
|
|
|
cleanTicker := time.NewTicker(30 * time.Minute)
|
|
|
|
defer cleanTicker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.stopCh:
|
|
|
|
log.Info("Compaction handler quit loopClean")
|
|
|
|
return
|
|
|
|
case <-cleanTicker.C:
|
|
|
|
c.Clean()
|
2023-12-22 12:00:43 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
2023-12-22 12:00:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) Clean() {
|
2024-06-05 10:17:50 +08:00
|
|
|
c.cleanCompactionTaskMeta()
|
2024-06-10 21:34:08 +08:00
|
|
|
c.cleanPartitionStats()
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) cleanCompactionTaskMeta() {
|
|
|
|
// gc clustering compaction tasks
|
|
|
|
triggers := c.meta.GetCompactionTasks()
|
|
|
|
for _, tasks := range triggers {
|
|
|
|
for _, task := range tasks {
|
|
|
|
if task.State == datapb.CompactionTaskState_completed || task.State == datapb.CompactionTaskState_cleaned {
|
|
|
|
duration := time.Since(time.Unix(task.StartTime, 0)).Seconds()
|
|
|
|
if duration > float64(Params.DataCoordCfg.CompactionDropToleranceInSeconds.GetAsDuration(time.Second)) {
|
|
|
|
// try best to delete meta
|
|
|
|
err := c.meta.DropCompactionTask(task)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("fail to drop task", zap.Int64("taskPlanID", task.PlanID), zap.Error(err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-12-22 12:00:43 +08:00
|
|
|
}
|
|
|
|
}
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-10 21:34:08 +08:00
|
|
|
func (c *compactionPlanHandler) cleanPartitionStats() error {
|
|
|
|
log.Debug("start gc partitionStats meta and files")
|
|
|
|
// gc partition stats
|
|
|
|
channelPartitionStatsInfos := make(map[string][]*datapb.PartitionStatsInfo)
|
|
|
|
unusedPartStats := make([]*datapb.PartitionStatsInfo, 0)
|
|
|
|
if c.meta.GetPartitionStatsMeta() == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
infos := c.meta.GetPartitionStatsMeta().ListAllPartitionStatsInfos()
|
|
|
|
for _, info := range infos {
|
|
|
|
collInfo := c.meta.(*meta).GetCollection(info.GetCollectionID())
|
|
|
|
if collInfo == nil {
|
|
|
|
unusedPartStats = append(unusedPartStats, info)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
channel := fmt.Sprintf("%d/%d/%s", info.CollectionID, info.PartitionID, info.VChannel)
|
|
|
|
if _, ok := channelPartitionStatsInfos[channel]; !ok {
|
|
|
|
channelPartitionStatsInfos[channel] = make([]*datapb.PartitionStatsInfo, 0)
|
|
|
|
}
|
|
|
|
channelPartitionStatsInfos[channel] = append(channelPartitionStatsInfos[channel], info)
|
|
|
|
}
|
|
|
|
log.Debug("channels with PartitionStats meta", zap.Int("len", len(channelPartitionStatsInfos)))
|
|
|
|
|
|
|
|
for _, info := range unusedPartStats {
|
|
|
|
log.Debug("collection has been dropped, remove partition stats",
|
|
|
|
zap.Int64("collID", info.GetCollectionID()))
|
|
|
|
if err := c.meta.CleanPartitionStatsInfo(info); err != nil {
|
|
|
|
log.Warn("gcPartitionStatsInfo fail", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for channel, infos := range channelPartitionStatsInfos {
|
|
|
|
sort.Slice(infos, func(i, j int) bool {
|
|
|
|
return infos[i].Version > infos[j].Version
|
|
|
|
})
|
|
|
|
log.Debug("PartitionStats in channel", zap.String("channel", channel), zap.Int("len", len(infos)))
|
|
|
|
if len(infos) > 2 {
|
|
|
|
for i := 2; i < len(infos); i++ {
|
|
|
|
info := infos[i]
|
|
|
|
if err := c.meta.CleanPartitionStatsInfo(info); err != nil {
|
|
|
|
log.Warn("gcPartitionStatsInfo fail", zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-11-05 22:25:00 +08:00
|
|
|
func (c *compactionPlanHandler) stop() {
|
2023-11-29 10:50:29 +08:00
|
|
|
c.stopOnce.Do(func() {
|
|
|
|
close(c.stopCh)
|
|
|
|
})
|
|
|
|
c.stopWg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *compactionPlanHandler) removeTasksByChannel(channel string) {
|
|
|
|
c.mu.Lock()
|
2024-06-05 10:17:50 +08:00
|
|
|
for id, task := range c.queueTasks {
|
|
|
|
log.Info("Compaction handler removing tasks by channel",
|
|
|
|
zap.String("channel", channel), zap.Any("id", id), zap.Any("task_channel", task.GetChannel()))
|
|
|
|
if task.GetChannel() == channel {
|
2023-12-05 18:44:37 +08:00
|
|
|
log.Info("Compaction handler removing tasks by channel",
|
|
|
|
zap.String("channel", channel),
|
2024-06-05 10:17:50 +08:00
|
|
|
zap.Int64("planID", task.GetPlanID()),
|
|
|
|
zap.Int64("node", task.GetNodeID()),
|
2023-12-05 18:44:37 +08:00
|
|
|
)
|
2024-06-05 10:17:50 +08:00
|
|
|
delete(c.queueTasks, id)
|
|
|
|
c.taskNumber.Dec()
|
2023-11-29 10:50:29 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.mu.Unlock()
|
|
|
|
c.executingMu.Lock()
|
|
|
|
for id, task := range c.executingTasks {
|
|
|
|
log.Info("Compaction handler removing tasks by channel",
|
|
|
|
zap.String("channel", channel), zap.Any("id", id), zap.Any("task_channel", task.GetChannel()))
|
|
|
|
if task.GetChannel() == channel {
|
|
|
|
log.Info("Compaction handler removing tasks by channel",
|
|
|
|
zap.String("channel", channel),
|
|
|
|
zap.Int64("planID", task.GetPlanID()),
|
|
|
|
zap.Int64("node", task.GetNodeID()),
|
|
|
|
)
|
|
|
|
delete(c.executingTasks, id)
|
|
|
|
c.taskNumber.Dec()
|
|
|
|
}
|
2023-11-29 18:54:27 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.Unlock()
|
2023-02-15 16:00:33 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) submitTask(t CompactionTask) {
|
2024-06-25 10:08:03 +08:00
|
|
|
_, span := otel.Tracer(typeutil.DataCoordRole).Start(context.Background(), fmt.Sprintf("Compaction-%s", t.GetType()))
|
|
|
|
t.SetSpan(span)
|
2023-11-07 03:18:18 +08:00
|
|
|
c.mu.Lock()
|
2024-06-05 10:17:50 +08:00
|
|
|
c.queueTasks[t.GetPlanID()] = t
|
2023-11-07 03:18:18 +08:00
|
|
|
c.mu.Unlock()
|
2024-06-05 10:17:50 +08:00
|
|
|
c.taskNumber.Add(1)
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
// restoreTask used to restore Task from etcd
|
|
|
|
func (c *compactionPlanHandler) restoreTask(t CompactionTask) {
|
2024-06-25 10:08:03 +08:00
|
|
|
_, span := otel.Tracer(typeutil.DataCoordRole).Start(context.Background(), fmt.Sprintf("Compaction-%s", t.GetType()))
|
|
|
|
t.SetSpan(span)
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.Lock()
|
|
|
|
c.executingTasks[t.GetPlanID()] = t
|
|
|
|
c.executingMu.Unlock()
|
|
|
|
c.taskNumber.Add(1)
|
|
|
|
}
|
2023-11-23 17:30:25 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
// getCompactionTask return compaction
|
|
|
|
func (c *compactionPlanHandler) getCompactionTask(planID int64) CompactionTask {
|
|
|
|
c.mu.RLock()
|
|
|
|
t, ok := c.queueTasks[planID]
|
|
|
|
if ok {
|
|
|
|
c.mu.RUnlock()
|
|
|
|
return t
|
2023-11-23 17:30:25 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.mu.RUnlock()
|
|
|
|
c.executingMu.RLock()
|
|
|
|
t, ok = c.executingTasks[planID]
|
|
|
|
if ok {
|
|
|
|
c.executingMu.RUnlock()
|
|
|
|
return t
|
2023-11-23 17:30:25 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.RUnlock()
|
|
|
|
return t
|
2023-11-23 17:30:25 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) enqueueCompaction(task *datapb.CompactionTask) error {
|
2024-06-10 21:34:08 +08:00
|
|
|
log := log.With(zap.Int64("planID", task.GetPlanID()), zap.Int64("triggerID", task.GetTriggerID()), zap.Int64("collectionID", task.GetCollectionID()), zap.String("type", task.GetType().String()))
|
2024-06-25 10:08:03 +08:00
|
|
|
t, err := c.createCompactTask(task)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2024-06-05 10:17:50 +08:00
|
|
|
}
|
2024-06-25 10:08:03 +08:00
|
|
|
t.SetTask(t.ShadowClone(setStartTime(time.Now().Unix())))
|
|
|
|
err = t.SaveTaskMeta()
|
|
|
|
if err != nil {
|
|
|
|
c.meta.SetSegmentsCompacting(t.GetInputSegments(), false)
|
|
|
|
return err
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.submitTask(t)
|
|
|
|
log.Info("Compaction plan submitted")
|
|
|
|
return nil
|
|
|
|
}
|
2021-11-05 22:25:00 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
// set segments compacting, one segment can only participate one compactionTask
|
2024-06-25 10:08:03 +08:00
|
|
|
func (c *compactionPlanHandler) createCompactTask(t *datapb.CompactionTask) (CompactionTask, error) {
|
2024-06-05 10:17:50 +08:00
|
|
|
var task CompactionTask
|
|
|
|
switch t.GetType() {
|
|
|
|
case datapb.CompactionType_MixCompaction:
|
|
|
|
task = &mixCompactionTask{
|
|
|
|
CompactionTask: t,
|
|
|
|
meta: c.meta,
|
|
|
|
sessions: c.sessions,
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
2023-12-05 18:44:37 +08:00
|
|
|
case datapb.CompactionType_Level0DeleteCompaction:
|
2024-06-05 10:17:50 +08:00
|
|
|
task = &l0CompactionTask{
|
|
|
|
CompactionTask: t,
|
|
|
|
meta: c.meta,
|
|
|
|
sessions: c.sessions,
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
2024-06-10 21:34:08 +08:00
|
|
|
case datapb.CompactionType_ClusteringCompaction:
|
|
|
|
task = &clusteringCompactionTask{
|
2024-07-04 08:48:08 +08:00
|
|
|
CompactionTask: t,
|
|
|
|
meta: c.meta,
|
|
|
|
sessions: c.sessions,
|
|
|
|
handler: c.handler,
|
|
|
|
analyzeScheduler: c.analyzeScheduler,
|
2024-06-10 21:34:08 +08:00
|
|
|
}
|
2024-06-25 10:08:03 +08:00
|
|
|
default:
|
|
|
|
return nil, merr.WrapErrIllegalCompactionPlan("illegal compaction type")
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
2024-06-25 10:08:03 +08:00
|
|
|
exist, succeed := c.meta.CheckAndSetSegmentsCompacting(t.GetInputSegments())
|
|
|
|
if !exist {
|
|
|
|
return nil, merr.WrapErrIllegalCompactionPlan("segment not exist")
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
2024-06-25 10:08:03 +08:00
|
|
|
if !succeed {
|
|
|
|
return nil, merr.WrapErrCompactionPlanConflict("segment is compacting")
|
|
|
|
}
|
|
|
|
return task, nil
|
2023-12-05 18:44:37 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) assignNodeIDs(tasks []CompactionTask) {
|
|
|
|
slots := c.cluster.QuerySlots()
|
|
|
|
if len(slots) == 0 {
|
|
|
|
return
|
2023-11-29 10:50:29 +08:00
|
|
|
}
|
2022-09-27 16:02:53 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
for _, t := range tasks {
|
|
|
|
nodeID := c.pickAnyNode(slots)
|
|
|
|
if nodeID == NullNodeID {
|
|
|
|
log.Info("cannot find datanode for compaction task",
|
|
|
|
zap.Int64("planID", t.GetPlanID()), zap.String("vchannel", t.GetChannel()))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
err := t.SetNodeID(nodeID)
|
2023-11-29 10:50:29 +08:00
|
|
|
if err != nil {
|
2024-06-05 10:17:50 +08:00
|
|
|
log.Info("compactionHandler assignNodeID failed",
|
|
|
|
zap.Int64("planID", t.GetPlanID()), zap.String("vchannel", t.GetChannel()), zap.Error(err))
|
|
|
|
} else {
|
|
|
|
log.Info("compactionHandler assignNodeID success",
|
|
|
|
zap.Int64("planID", t.GetPlanID()), zap.String("vchannel", t.GetChannel()), zap.Any("nodeID", nodeID))
|
2023-11-29 10:50:29 +08:00
|
|
|
}
|
2022-09-27 16:02:53 +08:00
|
|
|
}
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) checkCompaction() error {
|
2023-08-30 11:12:27 +08:00
|
|
|
// Get executing executingTasks before GetCompactionState from DataNode to prevent false failure,
|
2023-08-01 08:55:04 +08:00
|
|
|
// for DC might add new task while GetCompactionState.
|
2024-03-01 11:31:00 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
var needAssignIDTasks []CompactionTask
|
|
|
|
c.executingMu.RLock()
|
|
|
|
for _, t := range c.executingTasks {
|
|
|
|
if t.NeedReAssignNodeID() {
|
|
|
|
needAssignIDTasks = append(needAssignIDTasks, t)
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.RUnlock()
|
|
|
|
if len(needAssignIDTasks) > 0 {
|
|
|
|
c.assignNodeIDs(needAssignIDTasks)
|
2024-03-01 11:31:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
var finishedTasks []CompactionTask
|
|
|
|
c.executingMu.RLock()
|
|
|
|
for _, t := range c.executingTasks {
|
|
|
|
finished := t.Process()
|
|
|
|
if finished {
|
|
|
|
finishedTasks = append(finishedTasks, t)
|
2023-08-30 11:12:27 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.RUnlock()
|
2024-03-01 11:31:00 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
// delete all finished
|
|
|
|
c.executingMu.Lock()
|
|
|
|
for _, t := range finishedTasks {
|
|
|
|
delete(c.executingTasks, t.GetPlanID())
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
c.executingMu.Unlock()
|
|
|
|
c.taskNumber.Add(-int32(len(finishedTasks)))
|
|
|
|
return nil
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) pickAnyNode(nodeSlots map[int64]int64) int64 {
|
|
|
|
var (
|
|
|
|
nodeID int64 = NullNodeID
|
|
|
|
maxSlots int64 = -1
|
|
|
|
)
|
|
|
|
for id, slots := range nodeSlots {
|
|
|
|
if slots > 0 && slots > maxSlots {
|
|
|
|
nodeID = id
|
|
|
|
maxSlots = slots
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
return nodeID
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) pickShardNode(nodeSlots map[int64]int64, t CompactionTask) int64 {
|
|
|
|
nodeID, err := c.chManager.FindWatcher(t.GetChannel())
|
|
|
|
if err != nil {
|
|
|
|
log.Info("failed to find watcher", zap.Int64("planID", t.GetPlanID()), zap.Error(err))
|
|
|
|
return NullNodeID
|
2021-11-05 22:25:00 +08:00
|
|
|
}
|
2021-11-09 14:47:02 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
if nodeSlots[nodeID] > 0 {
|
|
|
|
return nodeID
|
2024-01-23 10:37:00 +08:00
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
return NullNodeID
|
2024-01-23 10:37:00 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
// isFull return true if the task pool is full
|
|
|
|
func (c *compactionPlanHandler) isFull() bool {
|
|
|
|
return c.getTaskCount() >= Params.DataCoordCfg.CompactionMaxParallelTasks.GetAsInt()
|
2023-02-15 16:00:33 +08:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) getTaskCount() int {
|
|
|
|
return int(c.taskNumber.Load())
|
2021-11-09 14:47:02 +08:00
|
|
|
}
|
2022-08-23 15:50:52 +08:00
|
|
|
|
2024-06-05 10:17:50 +08:00
|
|
|
func (c *compactionPlanHandler) getTasksByState(state datapb.CompactionTaskState) []CompactionTask {
|
|
|
|
c.mu.RLock()
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
tasks := make([]CompactionTask, 0, len(c.queueTasks))
|
|
|
|
for _, t := range c.queueTasks {
|
|
|
|
if t.GetState() == state {
|
|
|
|
tasks = append(tasks, t)
|
2023-12-22 12:00:43 +08:00
|
|
|
}
|
|
|
|
}
|
2024-06-05 10:17:50 +08:00
|
|
|
return tasks
|
2022-08-23 15:50:52 +08:00
|
|
|
}
|
2023-11-07 03:18:18 +08:00
|
|
|
|
|
|
|
var (
|
|
|
|
ioPool *conc.Pool[any]
|
|
|
|
ioPoolInitOnce sync.Once
|
|
|
|
)
|
|
|
|
|
|
|
|
func initIOPool() {
|
|
|
|
capacity := Params.DataNodeCfg.IOConcurrency.GetAsInt()
|
|
|
|
if capacity > 32 {
|
|
|
|
capacity = 32
|
|
|
|
}
|
|
|
|
// error only happens with negative expiry duration or with negative pre-alloc size.
|
|
|
|
ioPool = conc.NewPool[any](capacity)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getOrCreateIOPool() *conc.Pool[any] {
|
|
|
|
ioPoolInitOnce.Do(initIOPool)
|
|
|
|
return ioPool
|
|
|
|
}
|