2021-04-19 15:16:33 +08:00
|
|
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
|
|
|
// with the License. You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software distributed under the License
|
|
|
|
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
|
|
|
// or implied. See the License for the specific language governing permissions and limitations under the License.
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
package datanode
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-03-05 10:15:27 +08:00
|
|
|
"fmt"
|
2021-01-19 11:37:16 +08:00
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
2021-03-23 18:50:13 +08:00
|
|
|
"sync"
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
"github.com/golang/protobuf/proto"
|
2021-02-26 10:13:36 +08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2021-04-22 14:45:57 +08:00
|
|
|
"github.com/milvus-io/milvus/internal/kv"
|
|
|
|
miniokv "github.com/milvus-io/milvus/internal/kv/minio"
|
|
|
|
"github.com/milvus-io/milvus/internal/log"
|
|
|
|
"github.com/milvus-io/milvus/internal/msgstream"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
|
|
|
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
|
|
|
"github.com/milvus-io/milvus/internal/storage"
|
|
|
|
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
|
|
|
"github.com/milvus-io/milvus/internal/util/trace"
|
2021-03-25 14:41:46 +08:00
|
|
|
"github.com/opentracing/opentracing-go"
|
2021-01-19 11:37:16 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
type ddNode struct {
|
|
|
|
BaseNode
|
|
|
|
ddMsg *ddMsg
|
|
|
|
ddRecords *ddRecords
|
|
|
|
ddBuffer *ddBuffer
|
2021-03-23 18:50:13 +08:00
|
|
|
flushMap *sync.Map
|
2021-03-30 09:47:27 +08:00
|
|
|
inFlushCh <-chan *flushMsg
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-04-12 18:09:28 +08:00
|
|
|
kv kv.BaseKV
|
2021-03-23 18:50:13 +08:00
|
|
|
replica Replica
|
|
|
|
binlogMeta *binlogMeta
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type ddData struct {
|
|
|
|
ddRequestString []string
|
|
|
|
timestamps []Timestamp
|
|
|
|
eventTypes []storage.EventTypeCode
|
|
|
|
}
|
|
|
|
|
|
|
|
type ddBuffer struct {
|
|
|
|
ddData map[UniqueID]*ddData // collection ID
|
|
|
|
maxSize int32
|
|
|
|
}
|
|
|
|
|
|
|
|
type ddRecords struct {
|
|
|
|
collectionRecords map[UniqueID]interface{}
|
|
|
|
partitionRecords map[UniqueID]interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *ddBuffer) size() int32 {
|
|
|
|
if d.ddData == nil || len(d.ddData) <= 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var size int32 = 0
|
|
|
|
for _, data := range d.ddData {
|
|
|
|
size += int32(len(data.ddRequestString))
|
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *ddBuffer) full() bool {
|
|
|
|
return d.size() >= d.maxSize
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ddNode *ddNode) Name() string {
|
|
|
|
return "ddNode"
|
|
|
|
}
|
|
|
|
|
2021-03-25 14:41:46 +08:00
|
|
|
func (ddNode *ddNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
2021-01-19 11:37:16 +08:00
|
|
|
|
|
|
|
if len(in) != 1 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Invalid operate message input in ddNode", zap.Int("input length", len(in)))
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
2021-02-25 17:35:36 +08:00
|
|
|
msMsg, ok := in[0].(*MsgStreamMsg)
|
2021-01-19 11:37:16 +08:00
|
|
|
if !ok {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("type assertion failed for MsgStreamMsg")
|
2021-01-19 11:37:16 +08:00
|
|
|
// TODO: add error handling
|
|
|
|
}
|
|
|
|
|
2021-03-23 01:49:50 +08:00
|
|
|
if msMsg == nil {
|
2021-03-25 14:41:46 +08:00
|
|
|
return []Msg{}
|
|
|
|
}
|
|
|
|
var spans []opentracing.Span
|
|
|
|
for _, msg := range msMsg.TsMessages() {
|
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
spans = append(spans, sp)
|
|
|
|
msg.SetTraceCtx(ctx)
|
2021-03-23 01:49:50 +08:00
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
ddNode.ddMsg = &ddMsg{
|
2021-02-26 10:13:36 +08:00
|
|
|
collectionRecords: make(map[UniqueID][]*metaOperateRecord),
|
|
|
|
partitionRecords: make(map[UniqueID][]*metaOperateRecord),
|
2021-01-19 11:37:16 +08:00
|
|
|
timeRange: TimeRange{
|
|
|
|
timestampMin: msMsg.TimestampMin(),
|
|
|
|
timestampMax: msMsg.TimestampMax(),
|
|
|
|
},
|
2021-01-22 09:36:40 +08:00
|
|
|
flushMessages: make([]*flushMsg, 0),
|
|
|
|
gcRecord: &gcRecord{
|
|
|
|
collections: make([]UniqueID, 0),
|
|
|
|
},
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// sort tsMessages
|
|
|
|
tsMessages := msMsg.TsMessages()
|
|
|
|
sort.Slice(tsMessages,
|
|
|
|
func(i, j int) bool {
|
|
|
|
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
|
|
|
|
})
|
|
|
|
|
|
|
|
// do dd tasks
|
|
|
|
for _, msg := range tsMessages {
|
|
|
|
switch msg.Type() {
|
2021-03-10 14:45:35 +08:00
|
|
|
case commonpb.MsgType_CreateCollection:
|
2021-01-19 11:37:16 +08:00
|
|
|
ddNode.createCollection(msg.(*msgstream.CreateCollectionMsg))
|
2021-03-10 14:45:35 +08:00
|
|
|
case commonpb.MsgType_DropCollection:
|
2021-01-19 11:37:16 +08:00
|
|
|
ddNode.dropCollection(msg.(*msgstream.DropCollectionMsg))
|
2021-03-10 14:45:35 +08:00
|
|
|
case commonpb.MsgType_CreatePartition:
|
2021-01-19 11:37:16 +08:00
|
|
|
ddNode.createPartition(msg.(*msgstream.CreatePartitionMsg))
|
2021-03-10 14:45:35 +08:00
|
|
|
case commonpb.MsgType_DropPartition:
|
2021-01-19 11:37:16 +08:00
|
|
|
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
|
|
|
|
default:
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Not supporting message type", zap.Any("Type", msg.Type()))
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
// generate binlog
|
|
|
|
if ddNode.ddBuffer.full() {
|
|
|
|
for k, v := range ddNode.ddBuffer.ddData {
|
|
|
|
ddNode.flushMap.Store(k, v)
|
|
|
|
}
|
|
|
|
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
|
|
|
|
log.Debug(". dd buffer full, auto flushing ...")
|
|
|
|
go flushTxn(ddNode.flushMap, ddNode.kv, ddNode.binlogMeta)
|
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
select {
|
|
|
|
case fmsg := <-ddNode.inFlushCh:
|
2021-03-23 18:50:13 +08:00
|
|
|
log.Debug(". receive flush message ...")
|
|
|
|
localSegs := make([]UniqueID, 0, len(fmsg.segmentIDs))
|
2021-01-22 19:36:09 +08:00
|
|
|
for _, segID := range fmsg.segmentIDs {
|
|
|
|
if ddNode.replica.hasSegment(segID) {
|
|
|
|
localSegs = append(localSegs, segID)
|
|
|
|
}
|
|
|
|
}
|
2021-03-23 18:50:13 +08:00
|
|
|
|
|
|
|
if len(localSegs) <= 0 {
|
|
|
|
log.Debug(".. Segment not exist in this datanode, skip flushing ...")
|
|
|
|
break
|
2021-01-22 19:36:09 +08:00
|
|
|
}
|
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
log.Debug(".. Segments exist, notifying insertbuffer ...")
|
|
|
|
fmsg.segmentIDs = localSegs
|
|
|
|
ddNode.ddMsg.flushMessages = append(ddNode.ddMsg.flushMessages, fmsg)
|
2021-01-22 09:36:40 +08:00
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
if ddNode.ddBuffer.size() > 0 {
|
|
|
|
log.Debug(".. ddl buffer not empty, flushing ...")
|
|
|
|
for k, v := range ddNode.ddBuffer.ddData {
|
|
|
|
ddNode.flushMap.Store(k, v)
|
|
|
|
}
|
|
|
|
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
|
|
|
|
|
|
|
|
go flushTxn(ddNode.flushMap, ddNode.kv, ddNode.binlogMeta)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-03-25 14:41:46 +08:00
|
|
|
for _, span := range spans {
|
|
|
|
span.Finish()
|
|
|
|
}
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
var res Msg = ddNode.ddMsg
|
2021-03-25 14:41:46 +08:00
|
|
|
return []Msg{res}
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
/*
|
2021-03-23 18:50:13 +08:00
|
|
|
flushTxn() will do the following:
|
2021-02-26 10:13:36 +08:00
|
|
|
generate binlogs for all buffer data in ddNode,
|
|
|
|
store the generated binlogs to minIO/S3,
|
|
|
|
store the keys(paths to minIO/s3) of the binlogs to etcd.
|
|
|
|
|
|
|
|
The keys of the binlogs are generated as below:
|
|
|
|
${tenant}/data_definition_log/${collection_id}/ts/${log_idx}
|
|
|
|
${tenant}/data_definition_log/${collection_id}/ddl/${log_idx}
|
|
|
|
|
|
|
|
*/
|
2021-03-23 18:50:13 +08:00
|
|
|
func flushTxn(ddlData *sync.Map,
|
2021-04-12 18:09:28 +08:00
|
|
|
kv kv.BaseKV,
|
2021-03-23 18:50:13 +08:00
|
|
|
meta *binlogMeta) {
|
2021-01-19 11:37:16 +08:00
|
|
|
// generate binlog
|
|
|
|
ddCodec := &storage.DataDefinitionCodec{}
|
2021-03-23 18:50:13 +08:00
|
|
|
ddlData.Range(func(cID, d interface{}) bool {
|
|
|
|
|
|
|
|
data := d.(*ddData)
|
|
|
|
collID := cID.(int64)
|
|
|
|
log.Debug(".. ddl flushing ...", zap.Int64("collectionID", collID), zap.Int("length", len(data.ddRequestString)))
|
2021-01-19 11:37:16 +08:00
|
|
|
binLogs, err := ddCodec.Serialize(data.timestamps, data.ddRequestString, data.eventTypes)
|
2021-03-23 18:50:13 +08:00
|
|
|
if err != nil || len(binLogs) != 2 {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Codec Serialize wrong", zap.Error(err))
|
2021-03-23 18:50:13 +08:00
|
|
|
return false
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(data.ddRequestString) != len(data.timestamps) ||
|
|
|
|
len(data.timestamps) != len(data.eventTypes) {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("illegal ddBuffer, failed to save binlog")
|
2021-03-23 18:50:13 +08:00
|
|
|
return false
|
|
|
|
}
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
kvs := make(map[string]string, 2)
|
|
|
|
tsIdx, err := meta.genKey(true)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Id allocate wrong", zap.Error(err))
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
tsKey := path.Join(Params.DdlBinlogRootPath, strconv.FormatInt(collID, 10), binLogs[0].GetKey(), tsIdx)
|
|
|
|
kvs[tsKey] = string(binLogs[0].GetValue())
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
ddlIdx, err := meta.genKey(true)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Id allocate wrong", zap.Error(err))
|
|
|
|
return false
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
2021-03-23 18:50:13 +08:00
|
|
|
ddlKey := path.Join(Params.DdlBinlogRootPath, strconv.FormatInt(collID, 10), binLogs[1].GetKey(), ddlIdx)
|
|
|
|
kvs[ddlKey] = string(binLogs[1].GetValue())
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
// save ddl/ts binlog to minIO/s3
|
|
|
|
log.Debug(".. Saving ddl binlog to minIO/s3 ...")
|
|
|
|
err = kv.MultiSave(kvs)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Save to minIO/S3 Wrong", zap.Error(err))
|
|
|
|
_ = kv.MultiRemove([]string{tsKey, ddlKey})
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug(".. Saving ddl binlog meta ...")
|
|
|
|
err = meta.SaveDDLBinlogMetaTxn(collID, tsKey, ddlKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Save binlog meta to etcd Wrong", zap.Error(err))
|
|
|
|
_ = kv.MultiRemove([]string{tsKey, ddlKey})
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug(".. Clearing ddl flush buffer ...")
|
|
|
|
ddlData.Delete(collID)
|
|
|
|
return true
|
|
|
|
|
|
|
|
})
|
|
|
|
log.Debug(".. DDL flushing completed ...")
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
2021-03-25 14:41:46 +08:00
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
msg.SetTraceCtx(ctx)
|
|
|
|
defer sp.Finish()
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
collectionID := msg.CollectionID
|
|
|
|
|
|
|
|
// add collection
|
|
|
|
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; ok {
|
2021-03-05 10:15:27 +08:00
|
|
|
err := fmt.Errorf("collection %d is already exists", collectionID)
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("String conversion wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ddNode.ddRecords.collectionRecords[collectionID] = nil
|
|
|
|
|
|
|
|
// TODO: add default partition?
|
|
|
|
|
|
|
|
var schema schemapb.CollectionSchema
|
|
|
|
err := proto.Unmarshal(msg.Schema, &schema)
|
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("proto unmarshal wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// add collection
|
2021-01-25 18:33:10 +08:00
|
|
|
err = ddNode.replica.addCollection(collectionID, &schema)
|
2021-01-19 11:37:16 +08:00
|
|
|
if err != nil {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("replica add collection wrong", zap.Error(err))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
|
|
|
|
&metaOperateRecord{
|
2021-01-19 11:37:16 +08:00
|
|
|
createOrDrop: true,
|
|
|
|
timestamp: msg.Base.Timestamp,
|
|
|
|
})
|
|
|
|
|
|
|
|
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
|
|
|
if !ok {
|
|
|
|
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
|
|
|
ddRequestString: make([]string, 0),
|
|
|
|
timestamps: make([]Timestamp, 0),
|
|
|
|
eventTypes: make([]storage.EventTypeCode, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreateCollectionRequest.String())
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreateCollectionEventType)
|
|
|
|
}
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
/*
|
|
|
|
dropCollection will drop collection in ddRecords but won't drop collection in replica
|
|
|
|
*/
|
2021-01-19 11:37:16 +08:00
|
|
|
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
|
2021-03-25 14:41:46 +08:00
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
msg.SetTraceCtx(ctx)
|
|
|
|
defer sp.Finish()
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
collectionID := msg.CollectionID
|
|
|
|
|
|
|
|
// remove collection
|
|
|
|
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; !ok {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("Cannot find collection", zap.Int64("collection ID", collectionID))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(ddNode.ddRecords.collectionRecords, collectionID)
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
|
|
|
|
&metaOperateRecord{
|
2021-01-19 11:37:16 +08:00
|
|
|
createOrDrop: false,
|
|
|
|
timestamp: msg.Base.Timestamp,
|
|
|
|
})
|
|
|
|
|
|
|
|
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
|
|
|
if !ok {
|
|
|
|
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
|
|
|
ddRequestString: make([]string, 0),
|
|
|
|
timestamps: make([]Timestamp, 0),
|
|
|
|
eventTypes: make([]storage.EventTypeCode, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropCollectionRequest.String())
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropCollectionEventType)
|
|
|
|
|
|
|
|
ddNode.ddMsg.gcRecord.collections = append(ddNode.ddMsg.gcRecord.collections, collectionID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
|
2021-03-25 14:41:46 +08:00
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
msg.SetTraceCtx(ctx)
|
|
|
|
defer sp.Finish()
|
|
|
|
|
2021-01-19 11:37:16 +08:00
|
|
|
partitionID := msg.PartitionID
|
|
|
|
collectionID := msg.CollectionID
|
|
|
|
|
|
|
|
// add partition
|
|
|
|
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; ok {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("partition is already exists", zap.Int64("partition ID", partitionID))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ddNode.ddRecords.partitionRecords[partitionID] = nil
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
|
|
|
|
&metaOperateRecord{
|
2021-01-19 11:37:16 +08:00
|
|
|
createOrDrop: true,
|
|
|
|
timestamp: msg.Base.Timestamp,
|
|
|
|
})
|
|
|
|
|
|
|
|
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
|
|
|
if !ok {
|
|
|
|
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
|
|
|
ddRequestString: make([]string, 0),
|
|
|
|
timestamps: make([]Timestamp, 0),
|
|
|
|
eventTypes: make([]storage.EventTypeCode, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
ddNode.ddBuffer.ddData[collectionID].ddRequestString =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreatePartitionRequest.String())
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].timestamps =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].eventTypes =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreatePartitionEventType)
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
|
2021-03-25 14:41:46 +08:00
|
|
|
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
|
|
|
msg.SetTraceCtx(ctx)
|
|
|
|
defer sp.Finish()
|
2021-01-19 11:37:16 +08:00
|
|
|
partitionID := msg.PartitionID
|
|
|
|
collectionID := msg.CollectionID
|
|
|
|
|
|
|
|
// remove partition
|
|
|
|
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; !ok {
|
2021-02-26 10:13:36 +08:00
|
|
|
log.Error("cannot found partition", zap.Int64("partition ID", partitionID))
|
2021-01-19 11:37:16 +08:00
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(ddNode.ddRecords.partitionRecords, partitionID)
|
|
|
|
|
2021-02-26 10:13:36 +08:00
|
|
|
// partitionName := msg.PartitionName
|
|
|
|
// ddNode.ddMsg.partitionRecords[partitionName] = append(ddNode.ddMsg.partitionRecords[partitionName],
|
|
|
|
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
|
|
|
|
&metaOperateRecord{
|
2021-01-19 11:37:16 +08:00
|
|
|
createOrDrop: false,
|
|
|
|
timestamp: msg.Base.Timestamp,
|
|
|
|
})
|
|
|
|
|
|
|
|
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
|
|
|
if !ok {
|
|
|
|
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
|
|
|
ddRequestString: make([]string, 0),
|
|
|
|
timestamps: make([]Timestamp, 0),
|
|
|
|
eventTypes: make([]storage.EventTypeCode, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 09:36:40 +08:00
|
|
|
ddNode.ddBuffer.ddData[collectionID].ddRequestString =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropPartitionRequest.String())
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].timestamps =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
|
|
|
|
|
|
|
ddNode.ddBuffer.ddData[collectionID].eventTypes =
|
|
|
|
append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropPartitionEventType)
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
func newDDNode(ctx context.Context, binlogMeta *binlogMeta,
|
2021-03-30 09:47:27 +08:00
|
|
|
inFlushCh <-chan *flushMsg, replica Replica) *ddNode {
|
2021-01-19 11:37:16 +08:00
|
|
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
|
|
|
maxParallelism := Params.FlowGraphMaxParallelism
|
|
|
|
|
|
|
|
baseNode := BaseNode{}
|
|
|
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
|
|
|
baseNode.SetMaxParallelism(maxParallelism)
|
|
|
|
|
|
|
|
ddRecords := &ddRecords{
|
|
|
|
collectionRecords: make(map[UniqueID]interface{}),
|
|
|
|
partitionRecords: make(map[UniqueID]interface{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketName := Params.MinioBucketName
|
|
|
|
option := &miniokv.Option{
|
|
|
|
Address: Params.MinioAddress,
|
|
|
|
AccessKeyID: Params.MinioAccessKeyID,
|
|
|
|
SecretAccessKeyID: Params.MinioSecretAccessKey,
|
|
|
|
UseSSL: Params.MinioUseSSL,
|
|
|
|
BucketName: bucketName,
|
|
|
|
CreateBucket: true,
|
|
|
|
}
|
|
|
|
minioKV, err := miniokv.NewMinIOKV(ctx, option)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &ddNode{
|
|
|
|
BaseNode: baseNode,
|
|
|
|
ddRecords: ddRecords,
|
|
|
|
ddBuffer: &ddBuffer{
|
|
|
|
ddData: make(map[UniqueID]*ddData),
|
|
|
|
maxSize: Params.FlushDdBufferSize,
|
|
|
|
},
|
2021-01-22 09:36:40 +08:00
|
|
|
inFlushCh: inFlushCh,
|
2021-01-19 11:37:16 +08:00
|
|
|
|
2021-03-23 18:50:13 +08:00
|
|
|
// idAllocator: alloc,
|
|
|
|
kv: minioKV,
|
|
|
|
replica: replica,
|
|
|
|
binlogMeta: binlogMeta,
|
|
|
|
flushMap: &sync.Map{},
|
2021-01-19 11:37:16 +08:00
|
|
|
}
|
|
|
|
}
|