2023-06-13 10:20:37 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
package proxy
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
|
2023-06-25 17:20:43 +08:00
|
|
|
"github.com/samber/lo"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2023-06-16 18:38:39 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
2023-06-25 19:46:44 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/querycoordv2/params"
|
2023-06-13 10:20:37 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/types"
|
|
|
|
"github.com/milvus-io/milvus/pkg/log"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/merr"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/retry"
|
|
|
|
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
|
|
|
)
|
|
|
|
|
|
|
|
type executeFunc func(context.Context, UniqueID, types.QueryNode, ...string) error
|
|
|
|
|
|
|
|
type ChannelWorkload struct {
|
2023-08-01 17:33:06 +08:00
|
|
|
db string
|
|
|
|
collectionName string
|
|
|
|
collectionID int64
|
|
|
|
channel string
|
|
|
|
shardLeaders []int64
|
|
|
|
nq int64
|
|
|
|
exec executeFunc
|
|
|
|
retryTimes uint
|
2023-06-13 10:20:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type CollectionWorkLoad struct {
|
2023-08-01 17:33:06 +08:00
|
|
|
db string
|
|
|
|
collectionName string
|
|
|
|
collectionID int64
|
|
|
|
nq int64
|
|
|
|
exec executeFunc
|
2023-06-13 10:20:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type LBPolicy interface {
|
|
|
|
Execute(ctx context.Context, workload CollectionWorkLoad) error
|
|
|
|
ExecuteWithRetry(ctx context.Context, workload ChannelWorkload) error
|
2023-06-16 18:38:39 +08:00
|
|
|
UpdateCostMetrics(node int64, cost *internalpb.CostAggregation)
|
2023-06-27 09:52:44 +08:00
|
|
|
Start(ctx context.Context)
|
2023-06-16 18:38:39 +08:00
|
|
|
Close()
|
2023-06-13 10:20:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type LBPolicyImpl struct {
|
|
|
|
balancer LBBalancer
|
|
|
|
clientMgr shardClientMgr
|
|
|
|
}
|
|
|
|
|
2023-06-16 18:38:39 +08:00
|
|
|
func NewLBPolicyImpl(clientMgr shardClientMgr) *LBPolicyImpl {
|
2023-06-25 19:46:44 +08:00
|
|
|
balancePolicy := params.Params.ProxyCfg.ReplicaSelectionPolicy.GetValue()
|
|
|
|
|
|
|
|
var balancer LBBalancer
|
|
|
|
switch balancePolicy {
|
|
|
|
case "round_robin":
|
|
|
|
log.Info("use round_robin policy on replica selection")
|
|
|
|
balancer = NewRoundRobinBalancer()
|
|
|
|
default:
|
|
|
|
log.Info("use look_aside policy on replica selection")
|
|
|
|
balancer = NewLookAsideBalancer(clientMgr)
|
|
|
|
}
|
|
|
|
|
2023-06-13 10:20:37 +08:00
|
|
|
return &LBPolicyImpl{
|
|
|
|
balancer: balancer,
|
|
|
|
clientMgr: clientMgr,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-27 09:52:44 +08:00
|
|
|
func (lb *LBPolicyImpl) Start(ctx context.Context) {
|
|
|
|
lb.balancer.Start(ctx)
|
|
|
|
}
|
|
|
|
|
2023-06-13 10:20:37 +08:00
|
|
|
// try to select the best node from the available nodes
|
|
|
|
func (lb *LBPolicyImpl) selectNode(ctx context.Context, workload ChannelWorkload, excludeNodes typeutil.UniqueSet) (int64, error) {
|
|
|
|
log := log.With(
|
2023-08-01 17:33:06 +08:00
|
|
|
zap.Int64("collectionID", workload.collectionID),
|
|
|
|
zap.String("collectionName", workload.collectionName),
|
2023-06-13 10:20:37 +08:00
|
|
|
zap.String("channelName", workload.channel),
|
|
|
|
)
|
|
|
|
|
|
|
|
filterAvailableNodes := func(node int64, _ int) bool {
|
|
|
|
return !excludeNodes.Contain(node)
|
|
|
|
}
|
|
|
|
|
2023-06-25 17:20:43 +08:00
|
|
|
getShardLeaders := func() ([]int64, error) {
|
2023-08-01 17:33:06 +08:00
|
|
|
shardLeaders, err := globalMetaCache.GetShards(ctx, false, workload.db, workload.collectionName, workload.collectionID)
|
2023-06-13 10:20:37 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-06-25 17:20:43 +08:00
|
|
|
return lo.Map(shardLeaders[workload.channel], func(node nodeInfo, _ int) int64 { return node.nodeID }), nil
|
2023-06-13 10:20:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
availableNodes := lo.Filter(workload.shardLeaders, filterAvailableNodes)
|
2023-06-27 09:52:44 +08:00
|
|
|
targetNode, err := lb.balancer.SelectNode(ctx, availableNodes, workload.nq)
|
2023-06-13 10:20:37 +08:00
|
|
|
if err != nil {
|
2023-08-01 17:33:06 +08:00
|
|
|
globalMetaCache.DeprecateShardCache(workload.db, workload.collectionName)
|
2023-06-25 17:20:43 +08:00
|
|
|
nodes, err := getShardLeaders()
|
2023-06-13 10:20:37 +08:00
|
|
|
if err != nil || len(nodes) == 0 {
|
|
|
|
log.Warn("failed to get shard delegator",
|
|
|
|
zap.Error(err))
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
|
|
|
|
availableNodes := lo.Filter(nodes, filterAvailableNodes)
|
|
|
|
if len(availableNodes) == 0 {
|
|
|
|
log.Warn("no available shard delegator found",
|
|
|
|
zap.Int64s("nodes", nodes),
|
|
|
|
zap.Int64s("excluded", excludeNodes.Collect()))
|
2023-07-17 14:59:34 +08:00
|
|
|
return -1, merr.WrapErrServiceUnavailable("no available shard delegator found")
|
2023-06-13 10:20:37 +08:00
|
|
|
}
|
|
|
|
|
2023-06-27 09:52:44 +08:00
|
|
|
targetNode, err = lb.balancer.SelectNode(ctx, availableNodes, workload.nq)
|
2023-06-13 10:20:37 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to select shard",
|
2023-07-18 10:35:20 +08:00
|
|
|
zap.Int64s("availableNodes", availableNodes),
|
2023-06-13 10:20:37 +08:00
|
|
|
zap.Error(err))
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return targetNode, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExecuteWithRetry will choose a qn to execute the workload, and retry if failed, until reach the max retryTimes.
|
|
|
|
func (lb *LBPolicyImpl) ExecuteWithRetry(ctx context.Context, workload ChannelWorkload) error {
|
|
|
|
excludeNodes := typeutil.NewUniqueSet()
|
|
|
|
log := log.Ctx(ctx).With(
|
2023-08-01 17:33:06 +08:00
|
|
|
zap.Int64("collectionID", workload.collectionID),
|
|
|
|
zap.String("collectionName", workload.collectionName),
|
2023-06-13 10:20:37 +08:00
|
|
|
zap.String("channelName", workload.channel),
|
|
|
|
)
|
|
|
|
|
|
|
|
err := retry.Do(ctx, func() error {
|
|
|
|
targetNode, err := lb.selectNode(ctx, workload, excludeNodes)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to select node for shard",
|
|
|
|
zap.Int64("nodeID", targetNode),
|
|
|
|
zap.Error(err))
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err := lb.clientMgr.GetClient(ctx, targetNode)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("query channel failed, node not available",
|
|
|
|
zap.Int64("nodeID", targetNode),
|
|
|
|
zap.Error(err))
|
|
|
|
excludeNodes.Insert(targetNode)
|
2023-06-16 18:38:39 +08:00
|
|
|
|
|
|
|
// cancel work load which assign to the target node
|
|
|
|
lb.balancer.CancelWorkload(targetNode, workload.nq)
|
2023-06-13 10:20:37 +08:00
|
|
|
return merr.WrapErrShardDelegatorAccessFailed(workload.channel, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
err = workload.exec(ctx, targetNode, client, workload.channel)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("query channel failed",
|
|
|
|
zap.Int64("nodeID", targetNode),
|
|
|
|
zap.Error(err))
|
|
|
|
excludeNodes.Insert(targetNode)
|
2023-06-16 18:38:39 +08:00
|
|
|
lb.balancer.CancelWorkload(targetNode, workload.nq)
|
2023-06-13 10:20:37 +08:00
|
|
|
return merr.WrapErrShardDelegatorAccessFailed(workload.channel, err.Error())
|
|
|
|
}
|
2023-06-16 18:38:39 +08:00
|
|
|
|
|
|
|
lb.balancer.CancelWorkload(targetNode, workload.nq)
|
2023-06-13 10:20:37 +08:00
|
|
|
return nil
|
|
|
|
}, retry.Attempts(workload.retryTimes))
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute will execute collection workload in parallel
|
|
|
|
func (lb *LBPolicyImpl) Execute(ctx context.Context, workload CollectionWorkLoad) error {
|
2023-08-01 17:33:06 +08:00
|
|
|
dml2leaders, err := globalMetaCache.GetShards(ctx, true, workload.db, workload.collectionName, workload.collectionID)
|
2023-06-13 10:20:37 +08:00
|
|
|
if err != nil {
|
2023-07-14 16:08:31 +08:00
|
|
|
log.Ctx(ctx).Warn("failed to get shards", zap.Error(err))
|
2023-06-13 10:20:37 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
wg, ctx := errgroup.WithContext(ctx)
|
|
|
|
for channel, nodes := range dml2leaders {
|
|
|
|
channel := channel
|
|
|
|
nodes := lo.Map(nodes, func(node nodeInfo, _ int) int64 { return node.nodeID })
|
|
|
|
wg.Go(func() error {
|
|
|
|
err := lb.ExecuteWithRetry(ctx, ChannelWorkload{
|
2023-08-01 17:33:06 +08:00
|
|
|
db: workload.db,
|
|
|
|
collectionName: workload.collectionName,
|
|
|
|
collectionID: workload.collectionID,
|
|
|
|
channel: channel,
|
|
|
|
shardLeaders: nodes,
|
|
|
|
nq: workload.nq,
|
|
|
|
exec: workload.exec,
|
|
|
|
retryTimes: uint(len(nodes)),
|
2023-06-13 10:20:37 +08:00
|
|
|
})
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
err = wg.Wait()
|
|
|
|
return err
|
|
|
|
}
|
2023-06-16 18:38:39 +08:00
|
|
|
|
|
|
|
func (lb *LBPolicyImpl) UpdateCostMetrics(node int64, cost *internalpb.CostAggregation) {
|
|
|
|
lb.balancer.UpdateCostMetrics(node, cost)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lb *LBPolicyImpl) Close() {
|
|
|
|
lb.balancer.Close()
|
|
|
|
}
|