// Licensed to the LF AI & Data foundation under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rootcoord import ( "context" "errors" "fmt" "math/rand" "os" "sync" "sync/atomic" "syscall" "time" "github.com/samber/lo" "golang.org/x/sync/errgroup" "github.com/milvus-io/milvus-proto/go-api/commonpb" "github.com/milvus-io/milvus-proto/go-api/milvuspb" "github.com/milvus-io/milvus-proto/go-api/schemapb" "github.com/milvus-io/milvus/internal/allocator" "github.com/milvus-io/milvus/internal/common" pnc "github.com/milvus-io/milvus/internal/distributed/proxy/client" "github.com/milvus-io/milvus/internal/kv" etcdkv "github.com/milvus-io/milvus/internal/kv/etcd" "github.com/milvus-io/milvus/internal/log" "github.com/milvus-io/milvus/internal/metastore" "github.com/milvus-io/milvus/internal/metastore/db/dao" "github.com/milvus-io/milvus/internal/metastore/db/dbcore" "github.com/milvus-io/milvus/internal/metastore/db/rootcoord" kvmetestore "github.com/milvus-io/milvus/internal/metastore/kv/rootcoord" "github.com/milvus-io/milvus/internal/metastore/model" "github.com/milvus-io/milvus/internal/metrics" pb "github.com/milvus-io/milvus/internal/proto/etcdpb" "github.com/milvus-io/milvus/internal/proto/internalpb" "github.com/milvus-io/milvus/internal/proto/proxypb" "github.com/milvus-io/milvus/internal/proto/rootcoordpb" "github.com/milvus-io/milvus/internal/tso" "github.com/milvus-io/milvus/internal/types" "github.com/milvus-io/milvus/internal/util" "github.com/milvus-io/milvus/internal/util/commonpbutil" "github.com/milvus-io/milvus/internal/util/crypto" "github.com/milvus-io/milvus/internal/util/dependency" "github.com/milvus-io/milvus/internal/util/errorutil" "github.com/milvus-io/milvus/internal/util/funcutil" "github.com/milvus-io/milvus/internal/util/metricsinfo" "github.com/milvus-io/milvus/internal/util/paramtable" "github.com/milvus-io/milvus/internal/util/retry" "github.com/milvus-io/milvus/internal/util/sessionutil" "github.com/milvus-io/milvus/internal/util/timerecord" "github.com/milvus-io/milvus/internal/util/tsoutil" "github.com/milvus-io/milvus/internal/util/typeutil" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) // UniqueID is an alias of typeutil.UniqueID. type UniqueID = typeutil.UniqueID // Timestamp is an alias of typeutil.Timestamp type Timestamp = typeutil.Timestamp const InvalidCollectionID = UniqueID(0) var Params *paramtable.ComponentParam = paramtable.Get() type Opt func(*Core) type metaKVCreator func(root string) (kv.MetaKv, error) func defaultMetaKVCreator(etcdCli *clientv3.Client) metaKVCreator { return func(root string) (kv.MetaKv, error) { return etcdkv.NewEtcdKV(etcdCli, root), nil } } // Core root coordinator core type Core struct { ctx context.Context cancel context.CancelFunc wg sync.WaitGroup etcdCli *clientv3.Client address string meta IMetaTable scheduler IScheduler broker Broker ddlTsLockManager DdlTsLockManager garbageCollector GarbageCollector stepExecutor StepExecutor metaKVCreator metaKVCreator proxyCreator proxyCreator proxyManager *proxyManager proxyClientManager *proxyClientManager metricsCacheManager *metricsinfo.MetricsCacheManager chanTimeTick *timetickSync idAllocator allocator.Interface tsoAllocator tso.Allocator dataCoord types.DataCoord queryCoord types.QueryCoord indexCoord types.IndexCoord quotaCenter *QuotaCenter stateCode atomic.Value initOnce sync.Once startOnce sync.Once session *sessionutil.Session factory dependency.Factory importManager *importManager enableActiveStandBy bool activateFunc func() } // --------------------- function -------------------------- // NewCore creates a new rootcoord core func NewCore(c context.Context, factory dependency.Factory) (*Core, error) { ctx, cancel := context.WithCancel(c) rand.Seed(time.Now().UnixNano()) core := &Core{ ctx: ctx, cancel: cancel, factory: factory, enableActiveStandBy: Params.RootCoordCfg.EnableActiveStandby.GetAsBool(), } core.UpdateStateCode(commonpb.StateCode_Abnormal) core.proxyCreator = func(se *sessionutil.Session) (types.Proxy, error) { cli, err := pnc.NewClient(c, se.Address) if err != nil { return nil, err } if err := cli.Init(); err != nil { return nil, err } if err := cli.Start(); err != nil { return nil, err } return cli, nil } return core, nil } // UpdateStateCode update state code func (c *Core) UpdateStateCode(code commonpb.StateCode) { c.stateCode.Store(code) log.Info("update rootcoord state", zap.String("state", code.String())) } func (c *Core) checkHealthy() (commonpb.StateCode, bool) { code := c.stateCode.Load().(commonpb.StateCode) ok := code == commonpb.StateCode_Healthy return code, ok } func (c *Core) sendTimeTick(t Timestamp, reason string) error { pc := c.chanTimeTick.listDmlChannels() pt := make([]uint64, len(pc)) for i := 0; i < len(pt); i++ { pt[i] = t } ttMsg := internalpb.ChannelTimeTickMsg{ Base: commonpbutil.NewMsgBase( commonpbutil.WithMsgType(commonpb.MsgType_TimeTick), commonpbutil.WithTimeStamp(t), commonpbutil.WithSourceID(ddlSourceID), ), ChannelNames: pc, Timestamps: pt, DefaultTimestamp: t, } return c.chanTimeTick.updateTimeTick(&ttMsg, reason) } func (c *Core) sendMinDdlTsAsTt() { code := c.stateCode.Load().(commonpb.StateCode) if code != commonpb.StateCode_Healthy { log.Warn("rootCoord is not healthy, skip send timetick") return } minBgDdlTs := c.ddlTsLockManager.GetMinDdlTs() minNormalDdlTs := c.scheduler.GetMinDdlTs() minDdlTs := funcutil.Min(minBgDdlTs, minNormalDdlTs) // zero -> ddlTsLockManager and scheduler not started. if minDdlTs == typeutil.ZeroTimestamp { log.Warn("zero ts was met, this should be only occurred in starting state", zap.Uint64("minBgDdlTs", minBgDdlTs), zap.Uint64("minNormalDdlTs", minNormalDdlTs)) return } // max -> abnormal case, impossible. if minDdlTs == typeutil.MaxTimestamp { log.Warn("ddl ts is abnormal, max ts was met", zap.Uint64("minBgDdlTs", minBgDdlTs), zap.Uint64("minNormalDdlTs", minNormalDdlTs)) return } if err := c.sendTimeTick(minDdlTs, "timetick loop"); err != nil { log.Warn("failed to send timetick", zap.Error(err)) } } func (c *Core) startTimeTickLoop() { defer c.wg.Done() ticker := time.NewTicker(Params.ProxyCfg.TimeTickInterval.GetAsDuration(time.Millisecond)) for { select { case <-c.ctx.Done(): return case <-ticker.C: c.sendMinDdlTsAsTt() } } } func (c *Core) tsLoop() { defer c.wg.Done() tsoTicker := time.NewTicker(tso.UpdateTimestampStep) defer tsoTicker.Stop() ctx, cancel := context.WithCancel(c.ctx) defer cancel() for { select { case <-tsoTicker.C: if err := c.tsoAllocator.UpdateTSO(); err != nil { log.Warn("failed to update timestamp: ", zap.Error(err)) continue } ts := c.tsoAllocator.GetLastSavedTime() metrics.RootCoordTimestampSaved.Set(float64(ts.Unix())) if err := c.tsoAllocator.UpdateTSO(); err != nil { log.Warn("failed to update id: ", zap.Error(err)) continue } case <-ctx.Done(): return } } } func (c *Core) SetDataCoord(ctx context.Context, s types.DataCoord) error { if err := s.Init(); err != nil { return err } if err := s.Start(); err != nil { return err } c.dataCoord = s return nil } func (c *Core) SetIndexCoord(s types.IndexCoord) error { if err := s.Init(); err != nil { return err } if err := s.Start(); err != nil { return err } c.indexCoord = s return nil } func (c *Core) SetQueryCoord(s types.QueryCoord) error { if err := s.Init(); err != nil { return err } if err := s.Start(); err != nil { return err } c.queryCoord = s return nil } // Register register rootcoord at etcd func (c *Core) Register() error { c.session.Register() if c.enableActiveStandBy { c.session.ProcessActiveStandBy(c.activateFunc) } else { c.UpdateStateCode(commonpb.StateCode_Healthy) log.Debug("RootCoord start successfully ", zap.String("State Code", commonpb.StateCode_Healthy.String())) } log.Info("RootCoord Register Finished") go c.session.LivenessCheck(c.ctx, func() { log.Error("Root Coord disconnected from etcd, process will exit", zap.Int64("Server Id", c.session.ServerID)) if err := c.Stop(); err != nil { log.Fatal("failed to stop server", zap.Error(err)) } // manually send signal to starter goroutine if c.session.TriggerKill { if p, err := os.FindProcess(os.Getpid()); err == nil { p.Signal(syscall.SIGINT) } } }) c.UpdateStateCode(commonpb.StateCode_Healthy) return nil } func (c *Core) SetAddress(address string) { c.address = address } // SetEtcdClient sets the etcdCli of Core func (c *Core) SetEtcdClient(etcdClient *clientv3.Client) { c.etcdCli = etcdClient } func (c *Core) initSession() error { c.session = sessionutil.NewSession(c.ctx, Params.EtcdCfg.MetaRootPath.GetValue(), c.etcdCli) if c.session == nil { return fmt.Errorf("session is nil, the etcd client connection may have failed") } c.session.Init(typeutil.RootCoordRole, c.address, true, true) c.session.SetEnableActiveStandBy(c.enableActiveStandBy) return nil } func (c *Core) initKVCreator() { if c.metaKVCreator == nil { c.metaKVCreator = defaultMetaKVCreator(c.etcdCli) } } func (c *Core) initMetaTable() error { fn := func() error { var catalog metastore.RootCoordCatalog var err error switch Params.MetaStoreCfg.MetaStoreType.GetValue() { case util.MetaStoreTypeEtcd: var metaKV kv.MetaKv var ss *kvmetestore.SuffixSnapshot var err error if metaKV, err = c.metaKVCreator(Params.EtcdCfg.MetaRootPath.GetValue()); err != nil { return err } if ss, err = kvmetestore.NewSuffixSnapshot(metaKV, kvmetestore.SnapshotsSep, Params.EtcdCfg.MetaRootPath.GetValue(), kvmetestore.SnapshotPrefix); err != nil { return err } catalog = &kvmetestore.Catalog{Txn: metaKV, Snapshot: ss} case util.MetaStoreTypeMysql: // connect to database err := dbcore.Connect(&Params.DBCfg) if err != nil { return err } catalog = rootcoord.NewTableCatalog(dbcore.NewTxImpl(), dao.NewMetaDomain()) default: return retry.Unrecoverable(fmt.Errorf("not supported meta store: %s", Params.MetaStoreCfg.MetaStoreType.GetValue())) } if c.meta, err = NewMetaTable(c.ctx, catalog); err != nil { return err } return nil } return retry.Do(c.ctx, fn, retry.Attempts(10)) } func (c *Core) initIDAllocator() error { tsoKV := tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath.GetValue(), globalIDAllocatorSubPath) idAllocator := allocator.NewGlobalIDAllocator(globalIDAllocatorKey, tsoKV) if err := idAllocator.Initialize(); err != nil { return err } c.idAllocator = idAllocator return nil } func (c *Core) initTSOAllocator() error { tsoKV := tsoutil.NewTSOKVBase(c.etcdCli, Params.EtcdCfg.KvRootPath.GetValue(), globalTSOAllocatorSubPath) tsoAllocator := tso.NewGlobalTSOAllocator(globalTSOAllocatorKey, tsoKV) if err := tsoAllocator.Initialize(); err != nil { return err } c.tsoAllocator = tsoAllocator return nil } func (c *Core) initImportManager() error { impTaskKv, err := c.metaKVCreator(Params.EtcdCfg.KvRootPath.GetValue()) if err != nil { return err } f := NewImportFactory(c) c.importManager = newImportManager( c.ctx, impTaskKv, f.NewIDAllocator(), f.NewImportFunc(), f.NewMarkSegmentsDroppedFunc(), f.NewGetCollectionNameFunc(), f.NewDescribeIndexFunc(), f.NewGetSegmentIndexStateFunc(), f.NewUnsetIsImportingStateFunc(), ) c.importManager.init(c.ctx) return nil } func (c *Core) initInternal() error { if err := c.initSession(); err != nil { return err } c.initKVCreator() if err := c.initMetaTable(); err != nil { return err } if err := c.initIDAllocator(); err != nil { return err } if err := c.initTSOAllocator(); err != nil { return err } c.scheduler = newScheduler(c.ctx, c.idAllocator, c.tsoAllocator) c.factory.Init(Params) chanMap := c.meta.ListCollectionPhysicalChannels() c.chanTimeTick = newTimeTickSync(c.ctx, c.session.ServerID, c.factory, chanMap) c.proxyClientManager = newProxyClientManager(c.proxyCreator) c.broker = newServerBroker(c) c.ddlTsLockManager = newDdlTsLockManager(c.tsoAllocator) c.garbageCollector = newBgGarbageCollector(c) c.stepExecutor = newBgStepExecutor(c.ctx) c.proxyManager = newProxyManager( c.ctx, c.etcdCli, c.chanTimeTick.initSessions, c.proxyClientManager.GetProxyClients, ) c.proxyManager.AddSessionFunc(c.chanTimeTick.addSession, c.proxyClientManager.AddProxyClient) c.proxyManager.DelSessionFunc(c.chanTimeTick.delSession, c.proxyClientManager.DelProxyClient) c.metricsCacheManager = metricsinfo.NewMetricsCacheManager() c.quotaCenter = NewQuotaCenter(c.proxyClientManager, c.queryCoord, c.dataCoord, c.tsoAllocator) log.Debug("RootCoord init QuotaCenter done") if err := c.initImportManager(); err != nil { return err } if err := c.initCredentials(); err != nil { return err } if err := c.initRbac(); err != nil { return err } return nil } // Init initialize routine func (c *Core) Init() error { var initError error c.initOnce.Do(func() { initError = c.initInternal() }) return initError } func (c *Core) initCredentials() error { credInfo, _ := c.meta.GetCredential(util.UserRoot) if credInfo == nil { log.Debug("RootCoord init user root") encryptedRootPassword, _ := crypto.PasswordEncrypt(util.DefaultRootPassword) err := c.meta.AddCredential(&internalpb.CredentialInfo{Username: util.UserRoot, EncryptedPassword: encryptedRootPassword}) return err } return nil } func (c *Core) initRbac() (initError error) { // create default roles, including admin, public for _, role := range util.DefaultRoles { if initError = c.meta.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: role}); initError != nil { if common.IsIgnorableError(initError) { initError = nil continue } return } } // grant privileges for the public role globalPrivileges := []string{ commonpb.ObjectPrivilege_PrivilegeDescribeCollection.String(), commonpb.ObjectPrivilege_PrivilegeShowCollections.String(), } collectionPrivileges := []string{ commonpb.ObjectPrivilege_PrivilegeIndexDetail.String(), } for _, globalPrivilege := range globalPrivileges { if initError = c.meta.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: util.RolePublic}, Object: &milvuspb.ObjectEntity{Name: commonpb.ObjectType_Global.String()}, ObjectName: util.AnyWord, Grantor: &milvuspb.GrantorEntity{ User: &milvuspb.UserEntity{Name: util.UserRoot}, Privilege: &milvuspb.PrivilegeEntity{Name: globalPrivilege}, }, }, milvuspb.OperatePrivilegeType_Grant); initError != nil { if common.IsIgnorableError(initError) { initError = nil continue } return } } for _, collectionPrivilege := range collectionPrivileges { if initError = c.meta.OperatePrivilege(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: util.RolePublic}, Object: &milvuspb.ObjectEntity{Name: commonpb.ObjectType_Collection.String()}, ObjectName: util.AnyWord, Grantor: &milvuspb.GrantorEntity{ User: &milvuspb.UserEntity{Name: util.UserRoot}, Privilege: &milvuspb.PrivilegeEntity{Name: collectionPrivilege}, }, }, milvuspb.OperatePrivilegeType_Grant); initError != nil { if common.IsIgnorableError(initError) { initError = nil continue } return } } return nil } func (c *Core) restore(ctx context.Context) error { colls, err := c.meta.ListAbnormalCollections(ctx, typeutil.MaxTimestamp) if err != nil { return err } for _, coll := range colls { ts, err := c.tsoAllocator.GenerateTSO(1) if err != nil { return err } switch coll.State { case pb.CollectionState_CollectionDropping: go c.garbageCollector.ReDropCollection(coll.Clone(), ts) case pb.CollectionState_CollectionCreating: go c.garbageCollector.RemoveCreatingCollection(coll.Clone()) default: } } colls, err = c.meta.ListCollections(ctx, typeutil.MaxTimestamp) if err != nil { return err } for _, coll := range colls { for _, part := range coll.Partitions { ts, err := c.tsoAllocator.GenerateTSO(1) if err != nil { return err } switch part.State { case pb.PartitionState_PartitionDropping: go c.garbageCollector.ReDropPartition(coll.PhysicalChannelNames, part.Clone(), ts) default: } } } return nil } func (c *Core) startInternal() error { if err := c.proxyManager.WatchProxy(); err != nil { log.Fatal("rootcoord failed to watch proxy", zap.Error(err)) // you can not just stuck here, panic(err) } if err := c.restore(c.ctx); err != nil { panic(err) } c.wg.Add(6) go c.startTimeTickLoop() go c.tsLoop() go c.chanTimeTick.startWatch(&c.wg) go c.importManager.cleanupLoop(&c.wg) go c.importManager.sendOutTasksLoop(&c.wg) go c.importManager.flipTaskStateLoop(&c.wg) if Params.QuotaConfig.QuotaAndLimitsEnabled.GetAsBool() { go c.quotaCenter.run() } c.scheduler.Start() c.stepExecutor.Start() if c.enableActiveStandBy { c.activateFunc = func() { // todo to complete log.Info("rootcoord switch from standby to active, activating") c.UpdateStateCode(commonpb.StateCode_Healthy) } c.UpdateStateCode(commonpb.StateCode_StandBy) } return nil } // Start starts RootCoord. func (c *Core) Start() error { var err error c.startOnce.Do(func() { err = c.startInternal() }) return err } func (c *Core) stopExecutor() { if c.stepExecutor != nil { c.stepExecutor.Stop() log.Info("stop rootcoord executor") } } func (c *Core) stopScheduler() { if c.scheduler != nil { c.scheduler.Stop() log.Info("stop rootcoord scheduler") } } func (c *Core) cancelIfNotNil() { if c.cancel != nil { c.cancel() log.Info("cancel rootcoord goroutines") } } func (c *Core) revokeSession() { if c.session != nil { // wait at most one second to revoke c.session.Revoke(time.Second) log.Info("revoke rootcoord session") } } // Stop stops rootCoord. func (c *Core) Stop() error { c.UpdateStateCode(commonpb.StateCode_Abnormal) c.stopExecutor() c.stopScheduler() c.cancelIfNotNil() c.wg.Wait() c.revokeSession() return nil } // GetComponentStates get states of components func (c *Core) GetComponentStates(ctx context.Context) (*milvuspb.ComponentStates, error) { code := c.stateCode.Load().(commonpb.StateCode) nodeID := common.NotRegisteredID if c.session != nil && c.session.Registered() { nodeID = c.session.ServerID } return &milvuspb.ComponentStates{ State: &milvuspb.ComponentInfo{ // NodeID: c.session.ServerID, // will race with Core.Register() NodeID: nodeID, Role: typeutil.RootCoordRole, StateCode: code, ExtraInfo: nil, }, Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, Reason: "", }, SubcomponentStates: []*milvuspb.ComponentInfo{ { NodeID: nodeID, Role: typeutil.RootCoordRole, StateCode: code, ExtraInfo: nil, }, }, }, nil } // GetTimeTickChannel get timetick channel name func (c *Core) GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error) { return &milvuspb.StringResponse{ Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, Reason: "", }, Value: Params.CommonCfg.RootCoordTimeTick.GetValue(), }, nil } // GetStatisticsChannel get statistics channel name func (c *Core) GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error) { return &milvuspb.StringResponse{ Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, Reason: "", }, Value: Params.CommonCfg.RootCoordStatistics.GetValue(), }, nil } // CreateCollection create collection func (c *Core) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreateCollection") log.Ctx(ctx).Info("received request to create collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName())) t := &createCollectionTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Ctx(ctx).Error("failed to enqueue request to create collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Ctx(ctx).Error("failed to create collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreateCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreateCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCollections.Inc() log.Ctx(ctx).Info("done to create collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropCollection drop collection func (c *Core) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropCollection") log.Ctx(ctx).Info("received request to drop collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName())) t := &dropCollectionTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Ctx(ctx).Error("failed to enqueue request to drop collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Ctx(ctx).Error("failed to drop collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCollections.Dec() log.Ctx(ctx).Info("done to drop collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // HasCollection check collection existence func (c *Core) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), Value: false, }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("HasCollection") ts := getTravelTs(in) log := log.Ctx(ctx).With(zap.String("collection name", in.GetCollectionName()), zap.Uint64("ts", ts)) log.Info("received request to has collection") t := &hasCollectionTask{ baseTask: newBaseTask(ctx, c), Req: in, Rsp: &milvuspb.BoolResponse{}, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to has collection", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()), Value: false, }, nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to has collection", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasCollection failed: "+err.Error()), Value: false, }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("HasCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("HasCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to has collection", zap.Bool("exist", t.Rsp.GetValue())) return t.Rsp, nil } func (c *Core) describeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*model.Collection, error) { ts := getTravelTs(in) if in.GetCollectionName() != "" { return c.meta.GetCollectionByName(ctx, in.GetCollectionName(), ts) } return c.meta.GetCollectionByID(ctx, in.GetCollectionID(), ts) } func convertModelToDesc(collInfo *model.Collection, aliases []string) *milvuspb.DescribeCollectionResponse { resp := &milvuspb.DescribeCollectionResponse{Status: succStatus()} resp.Schema = &schemapb.CollectionSchema{ Name: collInfo.Name, Description: collInfo.Description, AutoID: collInfo.AutoID, Fields: model.MarshalFieldModels(collInfo.Fields), } resp.CollectionID = collInfo.CollectionID resp.VirtualChannelNames = collInfo.VirtualChannelNames resp.PhysicalChannelNames = collInfo.PhysicalChannelNames if collInfo.ShardsNum == 0 { collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames)) } resp.ShardsNum = collInfo.ShardsNum resp.ConsistencyLevel = collInfo.ConsistencyLevel resp.CreatedTimestamp = collInfo.CreateTime createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime) resp.CreatedUtcTimestamp = uint64(createdPhysicalTime) resp.Aliases = aliases resp.StartPositions = collInfo.StartPositions resp.CollectionName = resp.Schema.Name resp.Properties = collInfo.Properties return resp } // DescribeCollection return collection info func (c *Core) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.DescribeCollectionResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode"+commonpb.StateCode_name[int32(code)]), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DescribeCollection") ts := getTravelTs(in) log := log.Ctx(ctx).With(zap.String("collection name", in.GetCollectionName()), zap.Int64("id", in.GetCollectionID()), zap.Uint64("ts", ts)) // TODO(longjiquan): log may be very frequent here. log.Info("received request to describe collection") t := &describeCollectionTask{ baseTask: newBaseTask(ctx, c), Req: in, Rsp: &milvuspb.DescribeCollectionResponse{}, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to describe collection", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc() return &milvuspb.DescribeCollectionResponse{ // TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now. Status: failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), }, nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to describe collection", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.FailLabel).Inc() return &milvuspb.DescribeCollectionResponse{ // TODO: use commonpb.ErrorCode_CollectionNotExists. SDK use commonpb.ErrorCode_UnexpectedError now. Status: failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DescribeCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DescribeCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to describe collection", zap.Int64("collection_id", t.Rsp.GetCollectionID())) return t.Rsp, nil } // ShowCollections list all collection names func (c *Core) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.ShowCollectionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("ShowCollections") ts := getTravelTs(in) log := log.Ctx(ctx).With(zap.String("dbname", in.GetDbName()), zap.Uint64("ts", ts)) log.Info("received request to show collections") t := &showCollectionTask{ baseTask: newBaseTask(ctx, c), Req: in, Rsp: &milvuspb.ShowCollectionsResponse{}, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to show collections", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc() return &milvuspb.ShowCollectionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()), }, nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to show collections", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.FailLabel).Inc() return &milvuspb.ShowCollectionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowCollections failed: "+err.Error()), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("ShowCollections", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("ShowCollections").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to show collections", zap.Int("num of collections", len(t.Rsp.GetCollectionNames()))) // maybe very large, print number instead. return t.Rsp, nil } func (c *Core) AlterCollection(ctx context.Context, in *milvuspb.AlterCollectionRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("AlterCollection", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("AlterCollection") log.Ctx(ctx).Info("received request to alter collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName())) t := &alterCollectionTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to alter collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("AlterCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to alter collection", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("AlterCollection", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("AlterCollection", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("AlterCollection").Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCollections.Dec() log.Info("done to alter collection", zap.String("role", typeutil.RootCoordRole), zap.String("name", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // CreatePartition create partition func (c *Core) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreatePartition") log.Ctx(ctx).Info("received request to create partition", zap.String("role", typeutil.RootCoordRole), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName())) t := &createPartitionTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Ctx(ctx).Error("failed to enqueue request to create partition", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Ctx(ctx).Error("failed to create partition", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreatePartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreatePartition").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Ctx(ctx).Info("done to create partition", zap.String("role", typeutil.RootCoordRole), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropPartition drop partition func (c *Core) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropPartition") log.Ctx(ctx).Info("received request to drop partition", zap.String("role", typeutil.RootCoordRole), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName())) t := &dropPartitionTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Ctx(ctx).Error("failed to enqueue request to drop partition", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Ctx(ctx).Error("failed to drop partition", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropPartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropPartition").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Ctx(ctx).Info("done to drop partition", zap.String("role", typeutil.RootCoordRole), zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // HasPartition check partition existence func (c *Core) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), Value: false, }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("HasPartition") // TODO(longjiquan): why HasPartitionRequest doesn't contain Timestamp but other requests do. ts := typeutil.MaxTimestamp log := log.Ctx(ctx).With(zap.String("collection", in.GetCollectionName()), zap.String("partition", in.GetPartitionName()), zap.Uint64("ts", ts)) log.Info("received request to has partition") t := &hasPartitionTask{ baseTask: newBaseTask(ctx, c), Req: in, Rsp: &milvuspb.BoolResponse{}, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to has partition", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()), Value: false, }, nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to has partition", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.FailLabel).Inc() return &milvuspb.BoolResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "HasPartition failed: "+err.Error()), Value: false, }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("HasPartition", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("HasPartition").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to has partition", zap.Bool("exist", t.Rsp.GetValue())) return t.Rsp, nil } // ShowPartitions list all partition names func (c *Core) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.ShowPartitionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("ShowPartitions") log := log.Ctx(ctx).With(zap.String("collection", in.GetCollectionName())) log.Info("received request to show partitions") t := &showPartitionTask{ baseTask: newBaseTask(ctx, c), Req: in, Rsp: &milvuspb.ShowPartitionsResponse{}, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to show partitions", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc() return &milvuspb.ShowPartitionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()), }, nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to show partitions", zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.FailLabel).Inc() return &milvuspb.ShowPartitionsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ShowPartitions failed: "+err.Error()), }, nil } metrics.RootCoordDDLReqCounter.WithLabelValues("ShowPartitions", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("ShowPartitions").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to show partitions", zap.Strings("partitions", t.Rsp.GetPartitionNames())) return t.Rsp, nil } // ShowSegments list all segments func (c *Core) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error) { // ShowSegments Only used in GetPersistentSegmentInfo, it's already deprecated for a long time. // Though we continue to keep current logic, it's not right enough since RootCoord only contains indexed segments. return &milvuspb.ShowSegmentsResponse{Status: succStatus()}, nil } // AllocTimestamp alloc timestamp func (c *Core) AllocTimestamp(ctx context.Context, in *rootcoordpb.AllocTimestampRequest) (*rootcoordpb.AllocTimestampResponse, error) { if code, ok := c.checkHealthy(); !ok { return &rootcoordpb.AllocTimestampResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } ts, err := c.tsoAllocator.GenerateTSO(in.GetCount()) if err != nil { log.Ctx(ctx).Error("failed to allocate timestamp", zap.String("role", typeutil.RootCoordRole), zap.Error(err)) return &rootcoordpb.AllocTimestampResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocTimestamp failed: "+err.Error()), }, nil } // return first available timestamp ts = ts - uint64(in.GetCount()) + 1 metrics.RootCoordTimestamp.Set(float64(ts)) return &rootcoordpb.AllocTimestampResponse{ Status: succStatus(), Timestamp: ts, Count: in.GetCount(), }, nil } // AllocID alloc ids func (c *Core) AllocID(ctx context.Context, in *rootcoordpb.AllocIDRequest) (*rootcoordpb.AllocIDResponse, error) { if code, ok := c.checkHealthy(); !ok { return &rootcoordpb.AllocIDResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } start, _, err := c.idAllocator.Alloc(in.Count) if err != nil { log.Ctx(ctx).Error("failed to allocate id", zap.String("role", typeutil.RootCoordRole), zap.Error(err)) return &rootcoordpb.AllocIDResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "AllocID failed: "+err.Error()), Count: in.Count, }, nil } metrics.RootCoordIDAllocCounter.Add(float64(in.Count)) return &rootcoordpb.AllocIDResponse{ Status: succStatus(), ID: start, Count: in.Count, }, nil } // UpdateChannelTimeTick used to handle ChannelTimeTickMsg func (c *Core) UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) { log := log.Ctx(ctx) if code, ok := c.checkHealthy(); !ok { log.Warn("failed to updateTimeTick because rootcoord is not healthy", zap.Any("state", code)) return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } if in.Base.MsgType != commonpb.MsgType_TimeTick { log.Warn("failed to updateTimeTick because base messasge is not timetick, state", zap.Any("base message type", in.Base.MsgType)) msgTypeName := commonpb.MsgType_name[int32(in.Base.GetMsgType())] return failStatus(commonpb.ErrorCode_UnexpectedError, "invalid message type "+msgTypeName), nil } err := c.chanTimeTick.updateTimeTick(in, "gRPC") if err != nil { log.Warn("failed to updateTimeTick", zap.String("role", typeutil.RootCoordRole), zap.Error(err)) return failStatus(commonpb.ErrorCode_UnexpectedError, "UpdateTimeTick failed: "+err.Error()), nil } return succStatus(), nil } // InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies. func (c *Core) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } err := c.proxyClientManager.InvalidateCollectionMetaCache(ctx, in) if err != nil { return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } return succStatus(), nil } // ShowConfigurations returns the configurations of RootCoord matching req.Pattern func (c *Core) ShowConfigurations(ctx context.Context, req *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error) { if code, ok := c.checkHealthy(); !ok { return &internalpb.ShowConfigurationsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), Configuations: nil, }, nil } return getComponentConfigurations(ctx, req), nil } // GetMetrics get metrics func (c *Core) GetMetrics(ctx context.Context, in *milvuspb.GetMetricsRequest) (*milvuspb.GetMetricsResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.GetMetricsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), Response: "", }, nil } metricType, err := metricsinfo.ParseMetricType(in.Request) if err != nil { log.Warn("ParseMetricType failed", zap.String("role", typeutil.RootCoordRole), zap.Int64("node_id", c.session.ServerID), zap.String("req", in.Request), zap.Error(err)) return &milvuspb.GetMetricsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "ParseMetricType failed: "+err.Error()), Response: "", }, nil } log.Ctx(ctx).Debug("GetMetrics success", zap.String("role", typeutil.RootCoordRole), zap.String("metric_type", metricType)) if metricType == metricsinfo.SystemInfoMetrics { ret, err := c.metricsCacheManager.GetSystemInfoMetrics() if err == nil && ret != nil { return ret, nil } log.Warn("GetSystemInfoMetrics from cache failed", zap.String("role", typeutil.RootCoordRole), zap.Error(err)) systemInfoMetrics, err := c.getSystemInfoMetrics(ctx, in) if err != nil { log.Warn("GetSystemInfoMetrics failed", zap.String("role", typeutil.RootCoordRole), zap.String("metric_type", metricType), zap.Error(err)) return &milvuspb.GetMetricsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, fmt.Sprintf("getSystemInfoMetrics failed: %s", err.Error())), Response: "", }, nil } c.metricsCacheManager.UpdateSystemInfoMetrics(systemInfoMetrics) return systemInfoMetrics, err } log.Warn("GetMetrics failed, metric type not implemented", zap.String("role", typeutil.RootCoordRole), zap.String("metric_type", metricType)) return &milvuspb.GetMetricsResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, metricsinfo.MsgUnimplementedMetric), Response: "", }, nil } // CreateAlias create collection alias func (c *Core) CreateAlias(ctx context.Context, in *milvuspb.CreateAliasRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("CreateAlias") log.Ctx(ctx).Info("received request to create alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName())) t := &createAliasTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to create alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to create alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("CreateAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("CreateAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to create alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // DropAlias drop collection alias func (c *Core) DropAlias(ctx context.Context, in *milvuspb.DropAliasRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("DropAlias") log.Ctx(ctx).Info("received request to drop alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias())) t := &dropAliasTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to drop alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to drop alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("DropAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to drop alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // AlterAlias alter collection alias func (c *Core) AlterAlias(ctx context.Context, in *milvuspb.AlterAliasRequest) (*commonpb.Status, error) { if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("DropAlias", metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder("AlterAlias") log.Ctx(ctx).Info("received request to alter alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName())) t := &alterAliasTask{ baseTask: baseTask{ ctx: ctx, core: c, done: make(chan error, 1), }, Req: in, } if err := c.scheduler.AddTask(t); err != nil { log.Warn("failed to enqueue request to alter alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName())) metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } if err := t.WaitToFinish(); err != nil { log.Warn("failed to alter alias", zap.String("role", typeutil.RootCoordRole), zap.Error(err), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), nil } metrics.RootCoordDDLReqCounter.WithLabelValues("AlterAlias", metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues("AlterAlias").Observe(float64(tr.ElapseSpan().Milliseconds())) log.Info("done to alter alias", zap.String("role", typeutil.RootCoordRole), zap.String("alias", in.GetAlias()), zap.String("collection", in.GetCollectionName()), zap.Uint64("ts", t.GetTs())) return succStatus(), nil } // Import imports large files (json, numpy, etc.) on MinIO/S3 storage into Milvus storage. func (c *Core) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.ImportResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } // Get collection/partition ID from collection/partition name. var colInfo *model.Collection var err error if colInfo, err = c.meta.GetCollectionByName(ctx, req.GetCollectionName(), typeutil.MaxTimestamp); err != nil { log.Error("failed to find collection ID from its name", zap.String("collection name", req.GetCollectionName()), zap.Error(err)) return nil, err } cID := colInfo.CollectionID req.ChannelNames = c.meta.GetCollectionVirtualChannels(cID) if req.GetPartitionName() == "" { req.PartitionName = Params.CommonCfg.DefaultPartitionName.GetValue() } var pID UniqueID if pID, err = c.meta.GetPartitionByName(cID, req.GetPartitionName(), typeutil.MaxTimestamp); err != nil { log.Error("failed to get partition ID from its name", zap.String("partition name", req.GetPartitionName()), zap.Error(err)) return nil, err } log.Info("RootCoord receive import request", zap.String("collection name", req.GetCollectionName()), zap.Int64("collection ID", cID), zap.String("partition name", req.GetPartitionName()), zap.Strings("virtual channel names", req.GetChannelNames()), zap.Int64("partition ID", pID), zap.Int("# of files = ", len(req.GetFiles())), ) importJobResp := c.importManager.importJob(ctx, req, cID, pID) return importJobResp, nil } // GetImportState returns the current state of an import task. func (c *Core) GetImportState(ctx context.Context, req *milvuspb.GetImportStateRequest) (*milvuspb.GetImportStateResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.GetImportStateResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } return c.importManager.getTaskState(req.GetTask()), nil } // ListImportTasks returns id array of all import tasks. func (c *Core) ListImportTasks(ctx context.Context, req *milvuspb.ListImportTasksRequest) (*milvuspb.ListImportTasksResponse, error) { if code, ok := c.checkHealthy(); !ok { return &milvuspb.ListImportTasksResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), }, nil } colID := int64(-1) collectionName := req.GetCollectionName() if len(collectionName) != 0 { // if the collection name is specified but not found, user may input a wrong name, the collection doesn't exist or has been dropped. // we will return error to notify user the name is incorrect. colInfo, err := c.meta.GetCollectionByName(ctx, req.GetCollectionName(), typeutil.MaxTimestamp) if err != nil { err = fmt.Errorf("failed to find collection ID from its name: '%s', error: %w", req.GetCollectionName(), err) log.Error("ListImportTasks failed", zap.Error(err)) return &milvuspb.ListImportTasksResponse{ Status: failStatus(commonpb.ErrorCode_IllegalCollectionName, err.Error()), }, nil } colID = colInfo.CollectionID } // if the collection name is not specified, the colID is -1, listAllTasks will return all tasks tasks, err := c.importManager.listAllTasks(colID, req.GetLimit()) if err != nil { err = fmt.Errorf("failed to list import tasks, collection name: '%s', error: %w", req.GetCollectionName(), err) log.Error("ListImportTasks failed", zap.Error(err)) return &milvuspb.ListImportTasksResponse{ Status: failStatus(commonpb.ErrorCode_UnexpectedError, err.Error()), }, nil } resp := &milvuspb.ListImportTasksResponse{ Status: &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, }, Tasks: tasks, } return resp, nil } // ReportImport reports import task state to RootCoord. func (c *Core) ReportImport(ctx context.Context, ir *rootcoordpb.ImportResult) (*commonpb.Status, error) { log.Info("RootCoord receive import state report", zap.Int64("task ID", ir.GetTaskId()), zap.Any("import state", ir.GetState())) if code, ok := c.checkHealthy(); !ok { return failStatus(commonpb.ErrorCode_UnexpectedError, "StateCode="+commonpb.StateCode_name[int32(code)]), nil } // If setting ImportState_ImportCompleted, simply update the state and return directly. if ir.GetState() == commonpb.ImportState_ImportCompleted { if err := c.importManager.setImportTaskState(ir.GetTaskId(), commonpb.ImportState_ImportCompleted); err != nil { errMsg := "failed to set import task as ImportState_ImportCompleted" log.Error(errMsg, zap.Error(err)) return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: fmt.Sprintf("%s %s", errMsg, err.Error()), }, nil } return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, }, nil } // Upon receiving ReportImport request, update the related task's state in task store. ti, err := c.importManager.updateTaskInfo(ir) if err != nil { return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UpdateImportTaskFailure, Reason: err.Error(), }, nil } // This method update a busy node to idle node, and send import task to idle node resendTaskFunc := func() { func() { c.importManager.busyNodesLock.Lock() defer c.importManager.busyNodesLock.Unlock() delete(c.importManager.busyNodes, ir.GetDatanodeId()) log.Info("a DataNode is no longer busy after processing task", zap.Int64("dataNode ID", ir.GetDatanodeId()), zap.Int64("task ID", ir.GetTaskId())) }() err := c.importManager.sendOutTasks(c.importManager.ctx) if err != nil { log.Error("fail to send out import task to datanodes") } } // If task failed, send task to idle datanode if ir.GetState() == commonpb.ImportState_ImportFailed { // When a DataNode failed importing, remove this DataNode from the busy node list and send out import tasks again. log.Info("an import task has failed, marking DataNode available and resending import task", zap.Int64("task ID", ir.GetTaskId())) resendTaskFunc() } else if ir.GetState() != commonpb.ImportState_ImportPersisted { log.Debug("unexpected import task state reported, return immediately (this should not happen)", zap.Any("task ID", ir.GetTaskId()), zap.Any("import state", ir.GetState())) resendTaskFunc() } else { // Here ir.GetState() == commonpb.ImportState_ImportPersisted // When a DataNode finishes importing, remove this DataNode from the busy node list and send out import tasks again. resendTaskFunc() // Flush all import data segments. if err := c.broker.Flush(ctx, ti.GetCollectionId(), ir.GetSegments()); err != nil { log.Error("failed to call Flush on bulk insert segments", zap.Int64("task ID", ir.GetTaskId())) return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_UnexpectedError, Reason: err.Error(), }, nil } } return &commonpb.Status{ ErrorCode: commonpb.ErrorCode_Success, }, nil } // ExpireCredCache will call invalidate credential cache func (c *Core) ExpireCredCache(ctx context.Context, username string) error { req := proxypb.InvalidateCredCacheRequest{ Base: commonpbutil.NewMsgBase( commonpbutil.WithMsgType(0), //TODO, msg type commonpbutil.WithMsgID(0), //TODO, msg id commonpbutil.WithSourceID(c.session.ServerID), ), Username: username, } return c.proxyClientManager.InvalidateCredentialCache(ctx, &req) } // UpdateCredCache will call update credential cache func (c *Core) UpdateCredCache(ctx context.Context, credInfo *internalpb.CredentialInfo) error { req := proxypb.UpdateCredCacheRequest{ Base: commonpbutil.NewMsgBase( commonpbutil.WithMsgType(0), //TODO, msg type commonpbutil.WithMsgID(0), //TODO, msg id commonpbutil.WithSourceID(c.session.ServerID), ), Username: credInfo.Username, Password: credInfo.Sha256Password, } return c.proxyClientManager.UpdateCredentialCache(ctx, &req) } // CreateCredential create new user and password // 1. decode ciphertext password to raw password // 2. encrypt raw password // 3. save in to etcd func (c *Core) CreateCredential(ctx context.Context, credInfo *internalpb.CredentialInfo) (*commonpb.Status, error) { method := "CreateCredential" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) log.Debug("CreateCredential", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username)) // insert to db err := c.meta.AddCredential(credInfo) if err != nil { log.Error("CreateCredential save credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_CreateCredentialFailure, "CreateCredential failed: "+err.Error()), nil } // update proxy's local cache err = c.UpdateCredCache(ctx, credInfo) if err != nil { log.Warn("CreateCredential add cache failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() } log.Debug("CreateCredential success", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCredentials.Inc() return succStatus(), nil } // GetCredential get credential by username func (c *Core) GetCredential(ctx context.Context, in *rootcoordpb.GetCredentialRequest) (*rootcoordpb.GetCredentialResponse, error) { method := "GetCredential" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) log.Debug("GetCredential", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username)) credInfo, err := c.meta.GetCredential(in.Username) if err != nil { log.Error("GetCredential query credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return &rootcoordpb.GetCredentialResponse{ Status: failStatus(commonpb.ErrorCode_GetCredentialFailure, "GetCredential failed: "+err.Error()), }, err } log.Debug("GetCredential success", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &rootcoordpb.GetCredentialResponse{ Status: succStatus(), Username: credInfo.Username, Password: credInfo.EncryptedPassword, }, nil } // UpdateCredential update password for a user func (c *Core) UpdateCredential(ctx context.Context, credInfo *internalpb.CredentialInfo) (*commonpb.Status, error) { method := "UpdateCredential" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) log.Debug("UpdateCredential", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username)) // update data on storage err := c.meta.AlterCredential(credInfo) if err != nil { log.Error("UpdateCredential save credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UpdateCredentialFailure, "UpdateCredential failed: "+err.Error()), nil } // update proxy's local cache err = c.UpdateCredCache(ctx, credInfo) if err != nil { log.Error("UpdateCredential update cache failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_UpdateCredentialFailure, "UpdateCredential failed: "+err.Error()), nil } log.Debug("UpdateCredential success", zap.String("role", typeutil.RootCoordRole), zap.String("username", credInfo.Username)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return succStatus(), nil } // DeleteCredential delete a user func (c *Core) DeleteCredential(ctx context.Context, in *milvuspb.DeleteCredentialRequest) (*commonpb.Status, error) { method := "DeleteCredential" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) // delete data on storage err := c.meta.DeleteCredential(in.Username) if err != nil { log.Error("DeleteCredential remove credential failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_DeleteCredentialFailure, "DeleteCredential failed: "+err.Error()), err } // invalidate proxy's local cache err = c.ExpireCredCache(ctx, in.Username) if err != nil { log.Error("DeleteCredential expire credential cache failed", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return failStatus(commonpb.ErrorCode_DeleteCredentialFailure, "DeleteCredential failed: "+err.Error()), nil } log.Debug("DeleteCredential success", zap.String("role", typeutil.RootCoordRole), zap.String("username", in.Username)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfCredentials.Dec() return succStatus(), nil } // ListCredUsers list all usernames func (c *Core) ListCredUsers(ctx context.Context, in *milvuspb.ListCredUsersRequest) (*milvuspb.ListCredUsersResponse, error) { method := "ListCredUsers" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) credInfo, err := c.meta.ListCredentialUsernames() if err != nil { log.Ctx(ctx).Error("ListCredUsers query usernames failed", zap.String("role", typeutil.RootCoordRole), zap.Error(err)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.FailLabel).Inc() return &milvuspb.ListCredUsersResponse{ Status: failStatus(commonpb.ErrorCode_ListCredUsersFailure, "ListCredUsers failed: "+err.Error()), }, err } log.Debug("ListCredUsers success", zap.String("role", typeutil.RootCoordRole)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &milvuspb.ListCredUsersResponse{ Status: succStatus(), Usernames: credInfo.Usernames, }, nil } // CreateRole create role // - check the node health // - check if the role is existed // - check if the role num has reached the limit // - create the role by the meta api func (c *Core) CreateRole(ctx context.Context, in *milvuspb.CreateRoleRequest) (*commonpb.Status, error) { method := "CreateRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } entity := in.Entity err := c.meta.CreateRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}) if err != nil { errMsg := "fail to create role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_CreateRoleFailure, errMsg), nil } logger.Debug(method+" success", zap.String("role_name", entity.Name)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfRoles.Inc() return succStatus(), nil } // DropRole drop role // - check the node health // - check if the role name is existed // - check if the role has some grant info // - get all role mapping of this role // - drop these role mappings // - drop the role by the meta api func (c *Core) DropRole(ctx context.Context, in *milvuspb.DropRoleRequest) (*commonpb.Status, error) { method := "DropRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { errMsg := "the role isn't existed" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } grantEntities, err := c.meta.SelectGrant(util.DefaultTenant, &milvuspb.GrantEntity{ Role: &milvuspb.RoleEntity{Name: in.RoleName}, }) if len(grantEntities) != 0 { errMsg := "fail to drop the role that it has privileges. Use REVOKE API to revoke privileges" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } roleResults, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, true) if err != nil { errMsg := "fail to select a role by role name" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } logger.Debug("role to user info", zap.Int("counter", len(roleResults))) for _, roleResult := range roleResults { for index, userEntity := range roleResult.Users { if err = c.meta.OperateUserRole(util.DefaultTenant, &milvuspb.UserEntity{Name: userEntity.Name}, &milvuspb.RoleEntity{Name: roleResult.Role.Name}, milvuspb.OperateUserRoleType_RemoveUserFromRole); err != nil { if common.IsIgnorableError(err) { continue } errMsg := "fail to remove user from role" log.Error(errMsg, zap.Any("in", in), zap.String("role_name", roleResult.Role.Name), zap.String("username", userEntity.Name), zap.Int("current_index", index), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } } } if err = c.meta.DropGrant(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}); err != nil { errMsg := "fail to drop the grant" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } if err = c.meta.DropRole(util.DefaultTenant, in.RoleName); err != nil { errMsg := "fail to drop the role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_DropRoleFailure, errMsg), nil } logger.Debug(method+" success", zap.String("role_name", in.RoleName)) metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) metrics.RootCoordNumOfRoles.Dec() return succStatus(), nil } // OperateUserRole operate the relationship between a user and a role // - check the node health // - check if the role is valid // - check if the user is valid // - operate the user-role by the meta api // - update the policy cache func (c *Core) OperateUserRole(ctx context.Context, in *milvuspb.OperateUserRoleRequest) (*commonpb.Status, error) { method := "OperateUserRole-" + in.Type.String() metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.RoleName}, false); err != nil { errMsg := "not found the role: " + in.RoleName log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, false); err != nil { errMsg := "not found the user: " + in.Username log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } updateCache := true if err := c.meta.OperateUserRole(util.DefaultTenant, &milvuspb.UserEntity{Name: in.Username}, &milvuspb.RoleEntity{Name: in.RoleName}, in.Type); err != nil { if !common.IsIgnorableError(err) { errMsg := "fail to operate user to role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } updateCache = false } if updateCache { var opType int32 switch in.Type { case milvuspb.OperateUserRoleType_AddUserToRole: opType = int32(typeutil.CacheAddUserToRole) case milvuspb.OperateUserRoleType_RemoveUserFromRole: opType = int32(typeutil.CacheRemoveUserFromRole) default: errMsg := "invalid operate type for the OperateUserRole api" log.Error(errMsg, zap.Any("in", in)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } if err := c.proxyClientManager.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{ OpType: opType, OpKey: funcutil.EncodeUserRoleCache(in.Username, in.RoleName), }); err != nil { errMsg := "fail to refresh policy info cache" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperateUserRoleFailure, errMsg), nil } } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return succStatus(), nil } // SelectRole select role // - check the node health // - check if the role is valid when this param is provided // - select role by the meta api func (c *Core) SelectRole(ctx context.Context, in *milvuspb.SelectRoleRequest) (*milvuspb.SelectRoleResponse, error) { method := "SelectRole" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return &milvuspb.SelectRoleResponse{Status: errorutil.UnhealthyStatus(code)}, errorutil.UnhealthyError() } if in.Role != nil { if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: in.Role.Name}, false); err != nil { if common.IsKeyNotExistError(err) { return &milvuspb.SelectRoleResponse{ Status: succStatus(), }, nil } errMsg := "fail to select the role to check the role name" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectRoleResponse{ Status: failStatus(commonpb.ErrorCode_SelectRoleFailure, errMsg), }, nil } } roleResults, err := c.meta.SelectRole(util.DefaultTenant, in.Role, in.IncludeUserInfo) if err != nil { errMsg := "fail to select the role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectRoleResponse{ Status: failStatus(commonpb.ErrorCode_SelectRoleFailure, errMsg), }, nil } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &milvuspb.SelectRoleResponse{ Status: succStatus(), Results: roleResults, }, nil } // SelectUser select user // - check the node health // - check if the user is valid when this param is provided // - select user by the meta api func (c *Core) SelectUser(ctx context.Context, in *milvuspb.SelectUserRequest) (*milvuspb.SelectUserResponse, error) { method := "SelectUser" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return &milvuspb.SelectUserResponse{Status: errorutil.UnhealthyStatus(code)}, errorutil.UnhealthyError() } if in.User != nil { if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: in.User.Name}, false); err != nil { if common.IsKeyNotExistError(err) { return &milvuspb.SelectUserResponse{ Status: succStatus(), }, nil } errMsg := "fail to select the user to check the username" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectUserResponse{ Status: failStatus(commonpb.ErrorCode_SelectUserFailure, errMsg), }, nil } } userResults, err := c.meta.SelectUser(util.DefaultTenant, in.User, in.IncludeRoleInfo) if err != nil { errMsg := "fail to select the user" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectUserResponse{ Status: failStatus(commonpb.ErrorCode_SelectUserFailure, errMsg), }, nil } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &milvuspb.SelectUserResponse{ Status: succStatus(), Results: userResults, }, nil } func (c *Core) isValidRole(entity *milvuspb.RoleEntity) error { if entity == nil { return errors.New("the role entity is nil") } if entity.Name == "" { return errors.New("the name in the role entity is empty") } if _, err := c.meta.SelectRole(util.DefaultTenant, &milvuspb.RoleEntity{Name: entity.Name}, false); err != nil { log.Warn("fail to select the role", zap.Error(err)) return errors.New("not found the role: " + entity.Name) } return nil } func (c *Core) isValidObject(entity *milvuspb.ObjectEntity) error { if entity == nil { return errors.New("the object entity is nil") } if _, ok := commonpb.ObjectType_value[entity.Name]; !ok { return fmt.Errorf("not found the object type[name: %s], supported the object types: %v", entity.Name, lo.Keys(commonpb.ObjectType_value)) } return nil } func (c *Core) isValidGrantor(entity *milvuspb.GrantorEntity, object string) error { if entity == nil { return errors.New("the grantor entity is nil") } if entity.User == nil { return errors.New("the user entity in the grantor entity is nil") } if entity.User.Name == "" { return errors.New("the name in the user entity of the grantor entity is empty") } if _, err := c.meta.SelectUser(util.DefaultTenant, &milvuspb.UserEntity{Name: entity.User.Name}, false); err != nil { log.Warn("fail to select the user", zap.Error(err)) return errors.New("not found the user: " + entity.User.Name) } if entity.Privilege == nil { return errors.New("the privilege entity in the grantor entity is nil") } if util.IsAnyWord(entity.Privilege.Name) { return nil } if privilegeName := util.PrivilegeNameForMetastore(entity.Privilege.Name); privilegeName == "" { return fmt.Errorf("not found the privilege name[%s]", entity.Privilege.Name) } privileges, ok := util.ObjectPrivileges[object] if !ok { return fmt.Errorf("not found the object type[name: %s], supported the object types: %v", object, lo.Keys(commonpb.ObjectType_value)) } for _, privilege := range privileges { if privilege == entity.Privilege.Name { return nil } } return fmt.Errorf("not found the privilege name[%s]", entity.Privilege.Name) } // OperatePrivilege operate the privilege, including grant and revoke // - check the node health // - check if the operating type is valid // - check if the entity is nil // - check if the params, including the resource entity, the principal entity, the grantor entity, is valid // - operate the privilege by the meta api // - update the policy cache func (c *Core) OperatePrivilege(ctx context.Context, in *milvuspb.OperatePrivilegeRequest) (*commonpb.Status, error) { method := "OperatePrivilege" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return errorutil.UnhealthyStatus(code), errorutil.UnhealthyError() } if in.Type != milvuspb.OperatePrivilegeType_Grant && in.Type != milvuspb.OperatePrivilegeType_Revoke { errMsg := fmt.Sprintf("invalid operate privilege type, current type: %s, valid value: [%s, %s]", in.Type, milvuspb.OperatePrivilegeType_Grant, milvuspb.OperatePrivilegeType_Revoke) log.Error(errMsg, zap.Any("in", in)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, errMsg), nil } if in.Entity == nil { errMsg := "the grant entity in the request is nil" log.Error(errMsg, zap.Any("in", in)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, errMsg), nil } if err := c.isValidObject(in.Entity.Object); err != nil { log.Error("", zap.Error(err)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, err.Error()), nil } if err := c.isValidRole(in.Entity.Role); err != nil { log.Error("", zap.Error(err)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, err.Error()), nil } if err := c.isValidGrantor(in.Entity.Grantor, in.Entity.Object.Name); err != nil { log.Error("", zap.Error(err)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, err.Error()), nil } logger.Debug("before PrivilegeNameForMetastore", zap.String("privilege", in.Entity.Grantor.Privilege.Name)) if !util.IsAnyWord(in.Entity.Grantor.Privilege.Name) { in.Entity.Grantor.Privilege.Name = util.PrivilegeNameForMetastore(in.Entity.Grantor.Privilege.Name) } logger.Debug("after PrivilegeNameForMetastore", zap.String("privilege", in.Entity.Grantor.Privilege.Name)) if in.Entity.Object.Name == commonpb.ObjectType_Global.String() { in.Entity.ObjectName = util.AnyWord } updateCache := true if err := c.meta.OperatePrivilege(util.DefaultTenant, in.Entity, in.Type); err != nil { if !common.IsIgnorableError(err) { errMsg := "fail to operate the privilege" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, errMsg), nil } updateCache = false } if updateCache { var opType int32 switch in.Type { case milvuspb.OperatePrivilegeType_Grant: opType = int32(typeutil.CacheGrantPrivilege) case milvuspb.OperatePrivilegeType_Revoke: opType = int32(typeutil.CacheRevokePrivilege) default: errMsg := "invalid operate type for the OperatePrivilege api" log.Error(errMsg, zap.Any("in", in)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, errMsg), nil } if err := c.proxyClientManager.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{ OpType: opType, OpKey: funcutil.PolicyForPrivilege(in.Entity.Role.Name, in.Entity.Object.Name, in.Entity.ObjectName, in.Entity.Grantor.Privilege.Name), }); err != nil { errMsg := "fail to refresh policy info cache" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return failStatus(commonpb.ErrorCode_OperatePrivilegeFailure, errMsg), nil } } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return succStatus(), nil } // SelectGrant select grant // - check the node health // - check if the principal entity is valid // - check if the resource entity which is provided by the user is valid // - select grant by the meta api func (c *Core) SelectGrant(ctx context.Context, in *milvuspb.SelectGrantRequest) (*milvuspb.SelectGrantResponse, error) { method := "SelectGrant" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return &milvuspb.SelectGrantResponse{ Status: errorutil.UnhealthyStatus(code), }, errorutil.UnhealthyError() } if in.Entity == nil { errMsg := "the grant entity in the request is nil" log.Error(errMsg, zap.Any("in", in)) return &milvuspb.SelectGrantResponse{ Status: failStatus(commonpb.ErrorCode_SelectGrantFailure, errMsg), }, nil } if err := c.isValidRole(in.Entity.Role); err != nil { log.Error("", zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectGrantResponse{ Status: failStatus(commonpb.ErrorCode_SelectGrantFailure, err.Error()), }, nil } if in.Entity.Object != nil { if err := c.isValidObject(in.Entity.Object); err != nil { log.Error("", zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectGrantResponse{ Status: failStatus(commonpb.ErrorCode_SelectGrantFailure, err.Error()), }, nil } } grantEntities, err := c.meta.SelectGrant(util.DefaultTenant, in.Entity) if common.IsKeyNotExistError(err) { return &milvuspb.SelectGrantResponse{ Status: succStatus(), }, nil } if err != nil { errMsg := "fail to select the grant" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &milvuspb.SelectGrantResponse{ Status: failStatus(commonpb.ErrorCode_SelectGrantFailure, errMsg), }, nil } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &milvuspb.SelectGrantResponse{ Status: succStatus(), Entities: grantEntities, }, nil } func (c *Core) ListPolicy(ctx context.Context, in *internalpb.ListPolicyRequest) (*internalpb.ListPolicyResponse, error) { method := "PolicyList" metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.TotalLabel).Inc() tr := timerecord.NewTimeRecorder(method) logger.Debug(method, zap.Any("in", in)) if code, ok := c.checkHealthy(); !ok { return &internalpb.ListPolicyResponse{ Status: errorutil.UnhealthyStatus(code), }, errorutil.UnhealthyError() } policies, err := c.meta.ListPolicy(util.DefaultTenant) if err != nil { errMsg := "fail to list policy" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &internalpb.ListPolicyResponse{ Status: failStatus(commonpb.ErrorCode_ListPolicyFailure, errMsg), }, nil } userRoles, err := c.meta.ListUserRole(util.DefaultTenant) if err != nil { errMsg := "fail to list user-role" log.Error(errMsg, zap.Any("in", in), zap.Error(err)) return &internalpb.ListPolicyResponse{ Status: failStatus(commonpb.ErrorCode_ListPolicyFailure, "fail to list user-role"), }, nil } logger.Debug(method + " success") metrics.RootCoordDDLReqCounter.WithLabelValues(method, metrics.SuccessLabel).Inc() metrics.RootCoordDDLReqLatency.WithLabelValues(method).Observe(float64(tr.ElapseSpan().Milliseconds())) return &internalpb.ListPolicyResponse{ Status: succStatus(), PolicyInfos: policies, UserRoles: userRoles, }, nil } func (c *Core) CheckHealth(ctx context.Context, in *milvuspb.CheckHealthRequest) (*milvuspb.CheckHealthResponse, error) { if _, ok := c.checkHealthy(); !ok { reason := errorutil.UnHealthReason("rootcoord", c.session.ServerID, "rootcoord is unhealthy") return &milvuspb.CheckHealthResponse{IsHealthy: false, Reasons: []string{reason}}, nil } mu := &sync.Mutex{} group, ctx := errgroup.WithContext(ctx) errReasons := make([]string, 0, len(c.proxyClientManager.proxyClient)) for nodeID, proxyClient := range c.proxyClientManager.proxyClient { nodeID := nodeID proxyClient := proxyClient group.Go(func() error { sta, err := proxyClient.GetComponentStates(ctx) isHealthy, reason := errorutil.UnHealthReasonWithComponentStatesOrErr("proxy", nodeID, sta, err) if !isHealthy { mu.Lock() defer mu.Unlock() errReasons = append(errReasons, reason) } return err }) } err := group.Wait() if err != nil || len(errReasons) != 0 { return &milvuspb.CheckHealthResponse{IsHealthy: false, Reasons: errReasons}, nil } return &milvuspb.CheckHealthResponse{IsHealthy: true, Reasons: errReasons}, nil }