2021-10-25 19:48:23 +08:00
|
|
|
// Licensed to the LF AI & Data foundation under one
|
|
|
|
// or more contributor license agreements. See the NOTICE file
|
|
|
|
// distributed with this work for additional information
|
|
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
|
|
// to you under the Apache License, Version 2.0 (the
|
|
|
|
// "License"); you may not use this file except in compliance
|
2021-05-25 16:22:02 +08:00
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
2021-10-25 19:48:23 +08:00
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
2021-05-25 16:22:02 +08:00
|
|
|
//
|
2021-10-25 19:48:23 +08:00
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2021-06-23 14:28:08 +08:00
|
|
|
|
2021-06-22 10:42:07 +08:00
|
|
|
package datacoord
|
2021-05-25 16:22:02 +08:00
|
|
|
|
|
|
|
import (
|
2021-10-14 15:44:34 +08:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"time"
|
2021-05-25 16:22:02 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
"github.com/golang/protobuf/proto"
|
|
|
|
"github.com/milvus-io/milvus/internal/kv"
|
2021-05-27 14:14:05 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
2021-05-25 16:22:02 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/proto/datapb"
|
2022-01-05 14:08:42 +08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
"stathat.com/c/consistent"
|
2021-05-25 16:22:02 +08:00
|
|
|
)
|
|
|
|
|
2021-12-20 19:46:59 +08:00
|
|
|
// RegisterPolicy decides the channels mapping after registering the nodeID
|
2021-10-14 15:44:34 +08:00
|
|
|
type RegisterPolicy func(store ROChannelStore, nodeID int64) ChannelOpSet
|
2021-05-25 16:22:02 +08:00
|
|
|
|
2021-12-20 19:46:59 +08:00
|
|
|
// EmptyRegister does nothing
|
2021-10-14 15:44:34 +08:00
|
|
|
func EmptyRegister(store ROChannelStore, nodeID int64) ChannelOpSet {
|
|
|
|
return nil
|
2021-06-09 18:43:50 +08:00
|
|
|
}
|
|
|
|
|
2021-12-20 19:46:59 +08:00
|
|
|
// BufferChannelAssignPolicy assigns buffer channels to new registered node
|
2021-10-14 15:44:34 +08:00
|
|
|
func BufferChannelAssignPolicy(store ROChannelStore, nodeID int64) ChannelOpSet {
|
|
|
|
info := store.GetBufferChannelInfo()
|
|
|
|
if info == nil || len(info.Channels) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2021-05-25 16:22:02 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet := ChannelOpSet{}
|
|
|
|
opSet.Delete(info.NodeID, info.Channels)
|
|
|
|
opSet.Add(nodeID, info.Channels)
|
|
|
|
return opSet
|
2021-06-23 14:28:08 +08:00
|
|
|
}
|
|
|
|
|
2021-12-20 19:46:59 +08:00
|
|
|
// AvgAssignRegisterPolicy assigns channels with average to new registered node
|
2022-06-15 20:50:10 +08:00
|
|
|
// Register will not directly delete the node-channel pair. Channel manager will handle channel release.
|
2021-10-26 19:40:44 +08:00
|
|
|
func AvgAssignRegisterPolicy(store ROChannelStore, nodeID int64) ChannelOpSet {
|
|
|
|
opSet := BufferChannelAssignPolicy(store, nodeID)
|
|
|
|
if len(opSet) != 0 {
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
2022-06-15 20:50:10 +08:00
|
|
|
// Get a list of available node-channel info.
|
2022-10-31 11:35:34 +08:00
|
|
|
avaNodes := filterNode(store.GetNodesChannels(), nodeID)
|
2021-10-26 19:40:44 +08:00
|
|
|
|
|
|
|
channelNum := 0
|
2022-10-31 11:35:34 +08:00
|
|
|
for _, info := range avaNodes {
|
2021-10-26 19:40:44 +08:00
|
|
|
channelNum += len(info.Channels)
|
|
|
|
}
|
2023-02-06 10:33:54 +08:00
|
|
|
// store already add the new node
|
|
|
|
chPerNode := channelNum / len(store.GetNodes())
|
2022-06-15 20:50:10 +08:00
|
|
|
if chPerNode == 0 {
|
2021-10-26 19:40:44 +08:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort in descending order and reallocate
|
2022-10-31 11:35:34 +08:00
|
|
|
sort.Slice(avaNodes, func(i, j int) bool {
|
|
|
|
return len(avaNodes[i].Channels) > len(avaNodes[j].Channels)
|
2021-10-26 19:40:44 +08:00
|
|
|
})
|
|
|
|
|
2022-06-02 13:56:04 +08:00
|
|
|
releases := make(map[int64][]*channel)
|
2022-06-15 20:50:10 +08:00
|
|
|
for i := 0; i < chPerNode; i++ {
|
|
|
|
// Pick a node with its channel to release.
|
2022-10-31 11:35:34 +08:00
|
|
|
toRelease := avaNodes[i%len(avaNodes)]
|
2022-06-15 20:50:10 +08:00
|
|
|
// Pick a channel that will be reassigned to the new node later.
|
2022-10-31 11:35:34 +08:00
|
|
|
chIdx := i / len(avaNodes)
|
2022-06-15 20:50:10 +08:00
|
|
|
if chIdx >= len(toRelease.Channels) {
|
|
|
|
// Node has too few channels, simply skip. No re-picking.
|
|
|
|
// TODO: Consider re-picking in case assignment is extremely uneven?
|
2021-10-26 19:40:44 +08:00
|
|
|
continue
|
|
|
|
}
|
2022-06-15 20:50:10 +08:00
|
|
|
releases[toRelease.NodeID] = append(releases[toRelease.NodeID], toRelease.Channels[chIdx])
|
2021-10-26 19:40:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
opSet = ChannelOpSet{}
|
2022-06-15 20:50:10 +08:00
|
|
|
// Channels in `releases` are reassigned eventually by channel manager.
|
2022-06-02 13:56:04 +08:00
|
|
|
for k, v := range releases {
|
2021-10-26 19:40:44 +08:00
|
|
|
opSet.Add(k, v)
|
|
|
|
}
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
2022-06-15 20:50:10 +08:00
|
|
|
// filterNode filters out node-channel info where node ID == `nodeID`.
|
2021-10-26 19:40:44 +08:00
|
|
|
func filterNode(infos []*NodeChannelInfo, nodeID int64) []*NodeChannelInfo {
|
|
|
|
filtered := make([]*NodeChannelInfo, 0)
|
|
|
|
for _, info := range infos {
|
|
|
|
if info.NodeID == nodeID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
filtered = append(filtered, info)
|
|
|
|
}
|
|
|
|
return filtered
|
|
|
|
}
|
|
|
|
|
2021-12-17 19:46:42 +08:00
|
|
|
// ConsistentHashRegisterPolicy use a consistent hash to maintain the mapping
|
|
|
|
func ConsistentHashRegisterPolicy(hashRing *consistent.Consistent) RegisterPolicy {
|
2021-10-14 15:44:34 +08:00
|
|
|
return func(store ROChannelStore, nodeID int64) ChannelOpSet {
|
|
|
|
elems := formatNodeIDs(store.GetNodes())
|
2021-12-17 19:46:42 +08:00
|
|
|
hashRing.Set(elems)
|
2021-10-14 15:44:34 +08:00
|
|
|
|
2022-06-02 13:56:04 +08:00
|
|
|
releases := make(map[int64][]*channel)
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
// If there are buffer channels, then nodeID is the first node.
|
|
|
|
opSet := BufferChannelAssignPolicy(store, nodeID)
|
|
|
|
if len(opSet) != 0 {
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
|
|
|
opSet = ChannelOpSet{}
|
|
|
|
// If there are other nodes, channels on these nodes may be reassigned to
|
|
|
|
// the new registered node. We should find these channels.
|
|
|
|
channelsInfo := store.GetNodesChannels()
|
|
|
|
for _, c := range channelsInfo {
|
|
|
|
for _, ch := range c.Channels {
|
2021-12-17 19:46:42 +08:00
|
|
|
idStr, err := hashRing.Get(ch.Name)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
2021-12-17 19:46:42 +08:00
|
|
|
log.Warn("receive error when getting from hashRing",
|
2021-10-26 20:15:41 +08:00
|
|
|
zap.String("channel", ch.Name), zap.Error(err))
|
2021-10-14 15:44:34 +08:00
|
|
|
return nil
|
|
|
|
}
|
2021-12-17 19:46:42 +08:00
|
|
|
did, err := deformatNodeID(idStr)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to deformat node id", zap.Int64("nodeID", did))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if did != c.NodeID {
|
2022-06-02 13:56:04 +08:00
|
|
|
releases[c.NodeID] = append(releases[c.NodeID], ch)
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-15 20:50:10 +08:00
|
|
|
// Channels in `releases` are reassigned eventually by channel manager.
|
2022-06-02 13:56:04 +08:00
|
|
|
for id, channels := range releases {
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet.Add(id, channels)
|
|
|
|
}
|
|
|
|
return opSet
|
|
|
|
}
|
2021-06-23 14:28:08 +08:00
|
|
|
}
|
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
func formatNodeID(nodeID int64) string {
|
|
|
|
return strconv.FormatInt(nodeID, 10)
|
|
|
|
}
|
2021-05-25 16:22:02 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
func deformatNodeID(node string) (int64, error) {
|
|
|
|
return strconv.ParseInt(node, 10, 64)
|
2021-05-25 16:22:02 +08:00
|
|
|
}
|
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
// ChannelAssignPolicy assign channels to registered nodes.
|
|
|
|
type ChannelAssignPolicy func(store ROChannelStore, channels []*channel) ChannelOpSet
|
|
|
|
|
|
|
|
// AverageAssignPolicy ensure that the number of channels per nodes is approximately the same
|
|
|
|
func AverageAssignPolicy(store ROChannelStore, channels []*channel) ChannelOpSet {
|
2022-10-31 11:35:34 +08:00
|
|
|
newChannels := filterChannels(store, channels)
|
|
|
|
if len(newChannels) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
return nil
|
2021-05-27 14:14:05 +08:00
|
|
|
}
|
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet := ChannelOpSet{}
|
2022-10-31 11:35:34 +08:00
|
|
|
allDataNodes := store.GetNodesChannels()
|
2021-07-12 11:03:52 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
// If no datanode alive, save channels in buffer
|
2022-10-31 11:35:34 +08:00
|
|
|
if len(allDataNodes) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet.Add(bufferID, channels)
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort and assign
|
2022-10-31 11:35:34 +08:00
|
|
|
sort.Slice(allDataNodes, func(i, j int) bool {
|
|
|
|
return len(allDataNodes[i].Channels) <= len(allDataNodes[j].Channels)
|
2021-10-14 15:44:34 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
updates := make(map[int64][]*channel)
|
2022-10-31 11:35:34 +08:00
|
|
|
for i, newChannel := range newChannels {
|
|
|
|
n := allDataNodes[i%len(allDataNodes)].NodeID
|
|
|
|
updates[n] = append(updates[n], newChannel)
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for id, chs := range updates {
|
|
|
|
opSet.Add(id, chs)
|
|
|
|
}
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConsistentHashChannelAssignPolicy use a consistent hash algorithm to determine channel assignment
|
2021-12-17 19:46:42 +08:00
|
|
|
func ConsistentHashChannelAssignPolicy(hashRing *consistent.Consistent) ChannelAssignPolicy {
|
2021-10-14 15:44:34 +08:00
|
|
|
return func(store ROChannelStore, channels []*channel) ChannelOpSet {
|
2021-12-17 19:46:42 +08:00
|
|
|
hashRing.Set(formatNodeIDs(store.GetNodes()))
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
filteredChannels := filterChannels(store, channels)
|
|
|
|
if len(filteredChannels) == 0 {
|
|
|
|
return nil
|
2021-05-28 16:48:31 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
2021-12-17 19:46:42 +08:00
|
|
|
if len(hashRing.Members()) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet := ChannelOpSet{}
|
|
|
|
opSet.Add(bufferID, channels)
|
|
|
|
return opSet
|
2021-05-28 16:48:31 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
adds := make(map[int64][]*channel)
|
|
|
|
for _, c := range filteredChannels {
|
2021-12-17 19:46:42 +08:00
|
|
|
idStr, err := hashRing.Get(c.Name)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
2021-12-17 19:46:42 +08:00
|
|
|
log.Warn("receive error when getting from hashRing",
|
2021-10-26 20:15:41 +08:00
|
|
|
zap.String("channel", c.Name), zap.Error(err))
|
2021-10-14 15:44:34 +08:00
|
|
|
return nil
|
|
|
|
}
|
2021-12-17 19:46:42 +08:00
|
|
|
did, err := deformatNodeID(idStr)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
|
|
|
log.Warn("failed to deformat node id", zap.Int64("nodeID", did))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
adds[did] = append(adds[did], c)
|
2021-05-28 16:48:31 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
if len(adds) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
opSet := ChannelOpSet{}
|
|
|
|
for id, chs := range adds {
|
|
|
|
opSet.Add(id, chs)
|
|
|
|
}
|
|
|
|
return opSet
|
2021-05-27 14:14:05 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
2021-05-27 14:14:05 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
func filterChannels(store ROChannelStore, channels []*channel) []*channel {
|
|
|
|
channelsMap := make(map[string]*channel)
|
|
|
|
for _, c := range channels {
|
2021-10-26 20:15:41 +08:00
|
|
|
channelsMap[c.Name] = c
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
allChannelsInfo := store.GetChannels()
|
|
|
|
for _, info := range allChannelsInfo {
|
|
|
|
for _, c := range info.Channels {
|
2021-10-26 20:15:41 +08:00
|
|
|
delete(channelsMap, c.Name)
|
2021-05-28 16:48:31 +08:00
|
|
|
}
|
2021-05-27 14:14:05 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
if len(channelsMap) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
filtered := make([]*channel, 0, len(channelsMap))
|
|
|
|
for _, v := range channelsMap {
|
|
|
|
filtered = append(filtered, v)
|
|
|
|
}
|
|
|
|
return filtered
|
2021-05-27 14:14:05 +08:00
|
|
|
}
|
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
// DeregisterPolicy determine the mapping after deregistering the nodeID
|
|
|
|
type DeregisterPolicy func(store ROChannelStore, nodeID int64) ChannelOpSet
|
|
|
|
|
|
|
|
// EmptyDeregisterPolicy do nothing
|
|
|
|
func EmptyDeregisterPolicy(store ROChannelStore, nodeID int64) ChannelOpSet {
|
|
|
|
return nil
|
2021-06-23 14:28:08 +08:00
|
|
|
}
|
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
// AvgAssignUnregisteredChannels evenly assign the unregistered channels
|
|
|
|
func AvgAssignUnregisteredChannels(store ROChannelStore, nodeID int64) ChannelOpSet {
|
2022-10-31 11:35:34 +08:00
|
|
|
allNodes := store.GetNodesChannels()
|
|
|
|
avaNodes := make([]*NodeChannelInfo, 0, len(allNodes))
|
2021-10-14 15:44:34 +08:00
|
|
|
unregisteredChannels := make([]*channel, 0)
|
|
|
|
opSet := ChannelOpSet{}
|
|
|
|
|
2022-10-31 11:35:34 +08:00
|
|
|
for _, c := range allNodes {
|
2021-10-14 15:44:34 +08:00
|
|
|
if c.NodeID == nodeID {
|
|
|
|
opSet.Delete(nodeID, c.Channels)
|
|
|
|
unregisteredChannels = append(unregisteredChannels, c.Channels...)
|
|
|
|
continue
|
|
|
|
}
|
2022-10-31 11:35:34 +08:00
|
|
|
avaNodes = append(avaNodes, c)
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
|
2022-10-31 11:35:34 +08:00
|
|
|
if len(avaNodes) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet.Add(bufferID, unregisteredChannels)
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// sort and assign
|
2022-10-31 11:35:34 +08:00
|
|
|
sort.Slice(avaNodes, func(i, j int) bool {
|
|
|
|
return len(avaNodes[i].Channels) <= len(avaNodes[j].Channels)
|
2021-10-14 15:44:34 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
updates := make(map[int64][]*channel)
|
2022-10-31 11:35:34 +08:00
|
|
|
for i, unregisteredChannel := range unregisteredChannels {
|
|
|
|
n := avaNodes[i%len(avaNodes)].NodeID
|
|
|
|
updates[n] = append(updates[n], unregisteredChannel)
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
2021-05-25 16:22:02 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
for id, chs := range updates {
|
|
|
|
opSet.Add(id, chs)
|
2021-06-08 19:25:37 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
2021-11-16 13:21:16 +08:00
|
|
|
// ConsistentHashDeregisterPolicy return a DeregisterPolicy that uses consistent hash
|
2021-12-17 19:46:42 +08:00
|
|
|
func ConsistentHashDeregisterPolicy(hashRing *consistent.Consistent) DeregisterPolicy {
|
2021-10-14 15:44:34 +08:00
|
|
|
return func(store ROChannelStore, nodeID int64) ChannelOpSet {
|
2021-12-17 19:46:42 +08:00
|
|
|
hashRing.Set(formatNodeIDsWithFilter(store.GetNodes(), nodeID))
|
2021-10-14 15:44:34 +08:00
|
|
|
channels := store.GetNodesChannels()
|
|
|
|
opSet := ChannelOpSet{}
|
|
|
|
var deletedInfo *NodeChannelInfo
|
|
|
|
|
|
|
|
for _, cinfo := range channels {
|
|
|
|
if cinfo.NodeID == nodeID {
|
|
|
|
deletedInfo = cinfo
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if deletedInfo == nil {
|
|
|
|
log.Warn("failed to find node when applying deregister policy", zap.Int64("nodeID", nodeID))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
opSet.Delete(nodeID, deletedInfo.Channels)
|
|
|
|
|
|
|
|
// If no members in hash ring, store channels in buffer
|
2021-12-17 19:46:42 +08:00
|
|
|
if len(hashRing.Members()) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
opSet.Add(bufferID, deletedInfo.Channels)
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// reassign channels of deleted node
|
|
|
|
updates := make(map[int64][]*channel)
|
|
|
|
for _, c := range deletedInfo.Channels {
|
2021-12-17 19:46:42 +08:00
|
|
|
idStr, err := hashRing.Get(c.Name)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
2021-10-26 20:15:41 +08:00
|
|
|
log.Warn("failed to get channel in hash ring", zap.String("channel", c.Name))
|
2021-06-08 19:25:37 +08:00
|
|
|
return nil
|
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
2021-12-17 19:46:42 +08:00
|
|
|
did, err := deformatNodeID(idStr)
|
2021-10-14 15:44:34 +08:00
|
|
|
if err != nil {
|
2021-12-17 19:46:42 +08:00
|
|
|
log.Warn("failed to deformat id", zap.String("id", idStr))
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
updates[did] = append(updates[did], c)
|
2021-06-08 19:25:37 +08:00
|
|
|
}
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
for id, chs := range updates {
|
|
|
|
opSet.Add(id, chs)
|
|
|
|
}
|
|
|
|
return opSet
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-16 13:21:16 +08:00
|
|
|
// ChannelReassignPolicy is a policy for reassigning channels
|
2021-10-14 15:44:34 +08:00
|
|
|
type ChannelReassignPolicy func(store ROChannelStore, reassigns []*NodeChannelInfo) ChannelOpSet
|
|
|
|
|
2021-11-16 13:21:16 +08:00
|
|
|
// EmptyReassignPolicy is a dummy reassign policy
|
2021-10-14 15:44:34 +08:00
|
|
|
func EmptyReassignPolicy(store ROChannelStore, reassigns []*NodeChannelInfo) ChannelOpSet {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-02-22 13:15:51 +08:00
|
|
|
// AverageReassignPolicy is a reassigning policy that evenly assign channels
|
2021-10-14 15:44:34 +08:00
|
|
|
func AverageReassignPolicy(store ROChannelStore, reassigns []*NodeChannelInfo) ChannelOpSet {
|
2022-10-31 11:35:34 +08:00
|
|
|
allNodes := store.GetNodesChannels()
|
2021-10-14 15:44:34 +08:00
|
|
|
filterMap := make(map[int64]struct{})
|
|
|
|
for _, reassign := range reassigns {
|
|
|
|
filterMap[reassign.NodeID] = struct{}{}
|
2021-06-08 19:25:37 +08:00
|
|
|
}
|
2022-10-31 11:35:34 +08:00
|
|
|
avaNodes := make([]*NodeChannelInfo, 0, len(allNodes))
|
|
|
|
for _, c := range allNodes {
|
2021-10-14 15:44:34 +08:00
|
|
|
if _, ok := filterMap[c.NodeID]; ok {
|
|
|
|
continue
|
2021-06-08 19:25:37 +08:00
|
|
|
}
|
2022-10-31 11:35:34 +08:00
|
|
|
avaNodes = append(avaNodes, c)
|
2021-06-08 19:25:37 +08:00
|
|
|
}
|
|
|
|
|
2022-10-31 11:35:34 +08:00
|
|
|
if len(avaNodes) == 0 {
|
2021-10-14 15:44:34 +08:00
|
|
|
// if no node is left, do not reassign
|
|
|
|
return nil
|
|
|
|
}
|
2022-10-31 11:35:34 +08:00
|
|
|
sort.Slice(avaNodes, func(i, j int) bool {
|
|
|
|
return len(avaNodes[i].Channels) <= len(avaNodes[j].Channels)
|
|
|
|
})
|
2021-10-14 15:44:34 +08:00
|
|
|
|
|
|
|
// reassign channels to remaining nodes
|
|
|
|
i := 0
|
|
|
|
ret := make([]*ChannelOp, 0)
|
|
|
|
addUpdates := make(map[int64]*ChannelOp)
|
|
|
|
for _, reassign := range reassigns {
|
|
|
|
deleteUpdate := &ChannelOp{
|
|
|
|
Type: Delete,
|
|
|
|
Channels: reassign.Channels,
|
|
|
|
NodeID: reassign.NodeID,
|
|
|
|
}
|
|
|
|
ret = append(ret, deleteUpdate)
|
|
|
|
for _, ch := range reassign.Channels {
|
2022-10-31 11:35:34 +08:00
|
|
|
targetID := avaNodes[i%len(avaNodes)].NodeID
|
2021-10-14 15:44:34 +08:00
|
|
|
i++
|
|
|
|
if _, ok := addUpdates[targetID]; !ok {
|
|
|
|
addUpdates[targetID] = &ChannelOp{
|
|
|
|
Type: Add,
|
|
|
|
NodeID: targetID,
|
|
|
|
Channels: []*channel{ch},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
addUpdates[targetID].Channels = append(addUpdates[targetID].Channels, ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, update := range addUpdates {
|
|
|
|
ret = append(ret, update)
|
2021-07-12 11:03:52 +08:00
|
|
|
}
|
2021-06-08 19:25:37 +08:00
|
|
|
return ret
|
|
|
|
}
|
2021-06-23 14:28:08 +08:00
|
|
|
|
2021-10-14 15:44:34 +08:00
|
|
|
// ChannelBGChecker check nodes' channels and return the channels needed to be reallocated.
|
|
|
|
type ChannelBGChecker func(channels []*NodeChannelInfo, ts time.Time) ([]*NodeChannelInfo, error)
|
|
|
|
|
2021-11-19 18:05:25 +08:00
|
|
|
// EmptyBgChecker does nothing
|
2021-10-14 15:44:34 +08:00
|
|
|
func EmptyBgChecker(channels []*NodeChannelInfo, ts time.Time) ([]*NodeChannelInfo, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2022-11-29 17:19:14 +08:00
|
|
|
// BgCheckWithMaxWatchDuration returns a ChannelBGChecker with `Params.DataCoordCfg.MaxWatchDuration`.
|
2021-10-14 15:44:34 +08:00
|
|
|
func BgCheckWithMaxWatchDuration(kv kv.TxnKV) ChannelBGChecker {
|
|
|
|
return func(channels []*NodeChannelInfo, ts time.Time) ([]*NodeChannelInfo, error) {
|
2021-12-17 19:46:42 +08:00
|
|
|
reAllocations := make([]*NodeChannelInfo, 0, len(channels))
|
2021-10-14 15:44:34 +08:00
|
|
|
for _, ch := range channels {
|
|
|
|
cinfo := &NodeChannelInfo{
|
|
|
|
NodeID: ch.NodeID,
|
|
|
|
Channels: make([]*channel, 0),
|
|
|
|
}
|
|
|
|
for _, c := range ch.Channels {
|
2022-02-22 13:15:51 +08:00
|
|
|
k := buildNodeChannelKey(ch.NodeID, c.Name)
|
2021-10-14 15:44:34 +08:00
|
|
|
v, err := kv.Load(k)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
watchInfo := &datapb.ChannelWatchInfo{}
|
|
|
|
if err := proto.Unmarshal([]byte(v), watchInfo); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-07 21:58:22 +08:00
|
|
|
reviseVChannelInfo(watchInfo.GetVchan())
|
2023-02-15 16:20:34 +08:00
|
|
|
// if a channel is not watched or update watch progress after WatchTimeoutInterval,
|
2021-10-14 15:44:34 +08:00
|
|
|
// then we reallocate it to another node
|
2022-03-09 15:39:58 +08:00
|
|
|
if watchInfo.State == datapb.ChannelWatchState_Complete || watchInfo.State == datapb.ChannelWatchState_WatchSuccess {
|
2021-10-14 15:44:34 +08:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
startTime := time.Unix(watchInfo.StartTs, 0)
|
|
|
|
d := ts.Sub(startTime)
|
2023-02-15 16:20:34 +08:00
|
|
|
if d >= Params.DataCoordCfg.WatchTimeoutInterval.GetAsDuration(time.Second) {
|
2021-10-14 15:44:34 +08:00
|
|
|
cinfo.Channels = append(cinfo.Channels, c)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(cinfo.Channels) != 0 {
|
2021-12-17 19:46:42 +08:00
|
|
|
reAllocations = append(reAllocations, cinfo)
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
}
|
2021-12-17 19:46:42 +08:00
|
|
|
return reAllocations, nil
|
2021-10-14 15:44:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func formatNodeIDs(ids []int64) []string {
|
|
|
|
formatted := make([]string, 0, len(ids))
|
|
|
|
for _, id := range ids {
|
|
|
|
formatted = append(formatted, formatNodeID(id))
|
|
|
|
}
|
|
|
|
return formatted
|
|
|
|
}
|
|
|
|
|
|
|
|
func formatNodeIDsWithFilter(ids []int64, filter int64) []string {
|
|
|
|
formatted := make([]string, 0, len(ids))
|
|
|
|
for _, id := range ids {
|
|
|
|
if id == filter {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
formatted = append(formatted, formatNodeID(id))
|
|
|
|
}
|
|
|
|
return formatted
|
2021-06-23 14:28:08 +08:00
|
|
|
}
|