diff --git a/.clang-tidy b/.clang-tidy index a6b83c9af0..8cf39f2b2e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -18,7 +18,7 @@ # The checks defined here will be run and will display by default as warnings. Checks: > -*, clang-diagnostic-*, -clang-diagnostic-error, - clang-analyzer-*, -clang-analyzer-alpha*, + clang-analyzer-*, google-*, -google-runtime-references, -google-readability-todo, modernize-*, -modernize-use-trailing-return-type, -modernize-use-nodiscard, performance-*, diff --git a/configs/milvus.yaml b/configs/milvus.yaml index f77e1c75b0..265fdadd1a 100644 --- a/configs/milvus.yaml +++ b/configs/milvus.yaml @@ -453,9 +453,9 @@ common: entityExpiration: -1 # Entity expiration in seconds, CAUTION make sure entityExpiration >= retentionDuration and -1 means never expire indexSliceSize: 16 # MB threadCoreCoefficient: - highPriority: 100 # This parameter specify how many times the number of threads is the number of cores in high priority thread pool - middlePriority: 50 # This parameter specify how many times the number of threads is the number of cores in middle priority thread pool - lowPriority: 10 # This parameter specify how many times the number of threads is the number of cores in low priority thread pool + highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority thread pool + middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority thread pool + lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority thread pool DiskIndex: MaxDegree: 56 SearchListSize: 100 diff --git a/internal/querynodev2/segments/segment_loader.go b/internal/querynodev2/segments/segment_loader.go index 2c08e01e0d..05c6e344e8 100644 --- a/internal/querynodev2/segments/segment_loader.go +++ b/internal/querynodev2/segments/segment_loader.go @@ -301,6 +301,9 @@ func (loader *segmentLoader) notifyLoadFinish(segments ...*querypb.SegmentLoadIn func (loader *segmentLoader) requestResource(ctx context.Context, infos ...*querypb.SegmentLoadInfo) (LoadResource, int, error) { resource := LoadResource{} + loader.mut.Lock() + defer loader.mut.Unlock() + memoryUsage := hardware.GetUsedMemoryCount() totalMemory := hardware.GetMemoryCount() @@ -310,9 +313,6 @@ func (loader *segmentLoader) requestResource(ctx context.Context, infos ...*quer } diskCap := paramtable.Get().QueryNodeCfg.DiskCapacityLimit.GetAsUint64() - loader.mut.Lock() - defer loader.mut.Unlock() - poolCap := runtime.NumCPU() * paramtable.Get().CommonCfg.HighPriorityThreadCoreCoefficient.GetAsInt() if loader.committedResource.WorkNum >= poolCap { return resource, 0, merr.WrapErrServiceRequestLimitExceeded(int32(poolCap))