mirror of
https://gitee.com/milvus-io/milvus.git
synced 2024-11-30 02:48:45 +08:00
7056e9c0f7
Signed-off-by: luzhang <luzhang@zilliz.com> Co-authored-by: luzhang <luzhang@zilliz.com>
634 lines
29 KiB
YAML
634 lines
29 KiB
YAML
# Licensed to the LF AI & Data foundation under one
|
|
# or more contributor license agreements. See the NOTICE file
|
|
# distributed with this work for additional information
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
# to you under the Apache License, Version 2.0 (the
|
|
# "License"); you may not use this file except in compliance
|
|
# with the License. You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
# Related configuration of etcd, used to store Milvus metadata & service discovery.
|
|
etcd:
|
|
endpoints: localhost:2379
|
|
rootPath: by-dev # The root path where data is stored in etcd
|
|
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
|
|
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
|
|
log:
|
|
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
|
# path is one of:
|
|
# - "default" as os.Stderr,
|
|
# - "stderr" as os.Stderr,
|
|
# - "stdout" as os.Stdout,
|
|
# - file path to append server logs to.
|
|
# please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
|
|
path: stdout
|
|
ssl:
|
|
enabled: false # Whether to support ETCD secure connection mode
|
|
tlsCert: /path/to/etcd-client.pem # path to your cert file
|
|
tlsKey: /path/to/etcd-client-key.pem # path to your key file
|
|
tlsCACert: /path/to/ca.pem # path to your CACert file
|
|
# TLS min version
|
|
# Optional values: 1.0, 1.1, 1.2, 1.3。
|
|
# We recommend using version 1.2 and above.
|
|
tlsMinVersion: 1.3
|
|
use:
|
|
embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
|
|
data:
|
|
dir: default.etcd # Embedded Etcd only. please adjust in embedded Milvus: /tmp/milvus/etcdData/
|
|
|
|
metastore:
|
|
# Default value: etcd
|
|
# Valid values: etcd
|
|
type: etcd
|
|
|
|
localStorage:
|
|
path: /var/lib/milvus/data/ # please adjust in embedded Milvus: /tmp/milvus/data/
|
|
|
|
# Related configuration of MinIO/S3/GCS or any other service supports S3 API, which is responsible for data persistence for Milvus.
|
|
# We refer to the storage service as MinIO/S3 in the following description for simplicity.
|
|
minio:
|
|
address: localhost # Address of MinIO/S3
|
|
port: 9000 # Port of MinIO/S3
|
|
accessKeyID: minioadmin # accessKeyID of MinIO/S3
|
|
secretAccessKey: minioadmin # MinIO/S3 encryption string
|
|
useSSL: false # Access to MinIO/S3 with SSL
|
|
bucketName: a-bucket # Bucket name in MinIO/S3
|
|
rootPath: files # The root path where the message is stored in MinIO/S3
|
|
# Whether to useIAM role to access S3/GCS instead of access/secret keys
|
|
# For more information, refer to
|
|
# aws: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
|
|
# gcp: https://cloud.google.com/storage/docs/access-control/iam
|
|
# aliyun (ack): https://www.alibabacloud.com/help/en/container-service-for-kubernetes/latest/use-rrsa-to-enforce-access-control
|
|
# aliyun (ecs): https://www.alibabacloud.com/help/en/elastic-compute-service/latest/attach-an-instance-ram-role
|
|
useIAM: false
|
|
# Cloud Provider of S3. Supports: "aws", "gcp", "aliyun".
|
|
# You can use "aws" for other cloud provider supports S3 API with signature v4, e.g.: minio
|
|
# You can use "gcp" for other cloud provider supports S3 API with signature v2
|
|
# You can use "aliyun" for other cloud provider uses virtual host style bucket
|
|
# When useIAM enabled, only "aws", "gcp", "aliyun" is supported for now
|
|
cloudProvider: aws
|
|
# Custom endpoint for fetch IAM role credentials. when useIAM is true & cloudProvider is "aws".
|
|
# Leave it empty if you want to use AWS default endpoint
|
|
iamEndpoint:
|
|
# Log level for aws sdk log.
|
|
# Supported level: off, fatal, error, warn, info, debug, trace
|
|
logLevel: fatal
|
|
# Cloud data center region
|
|
region: ""
|
|
# Cloud whether use virtual host bucket mode
|
|
useVirtualHost: false
|
|
|
|
# Milvus supports four MQ: rocksmq(based on RockDB), natsmq(embedded nats-server), Pulsar and Kafka.
|
|
# You can change your mq by setting mq.type field.
|
|
# If you don't set mq.type field as default, there is a note about enabling priority if we config multiple mq in this file.
|
|
# 1. standalone(local) mode: rocksmq(default) > Pulsar > Kafka
|
|
# 2. cluster mode: Pulsar(default) > Kafka (rocksmq and natsmq is unsupported in cluster mode)
|
|
mq:
|
|
# Default value: "default"
|
|
# Valid values: [default, pulsar, kafka, rocksmq, natsmq]
|
|
type: default
|
|
|
|
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
|
|
pulsar:
|
|
address: localhost # Address of pulsar
|
|
port: 6650 # Port of Pulsar
|
|
webport: 80 # Web port of pulsar, if you connect direcly without proxy, should use 8080
|
|
maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
|
|
tenant: public
|
|
namespace: default
|
|
requestTimeout: 60 # pulsar client global request timeout in seconds
|
|
|
|
# If you want to enable kafka, needs to comment the pulsar configs
|
|
# kafka:
|
|
# brokerList:
|
|
# saslUsername:
|
|
# saslPassword:
|
|
# saslMechanisms: PLAIN
|
|
# securityProtocol: SASL_SSL
|
|
|
|
rocksmq:
|
|
# The path where the message is stored in rocksmq
|
|
# please adjust in embedded Milvus: /tmp/milvus/rdb_data
|
|
path: /var/lib/milvus/rdb_data
|
|
lrucacheratio: 0.06 # rocksdb cache memory ratio
|
|
rocksmqPageSize: 67108864 # 64 MB, 64 * 1024 * 1024 bytes, The size of each page of messages in rocksmq
|
|
retentionTimeInMinutes: 4320 # 3 days, 3 * 24 * 60 minutes, The retention time of the message in rocksmq.
|
|
retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
|
|
compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
|
|
# compaction compression type, only support use 0,7.
|
|
# 0 means not compress, 7 will use zstd
|
|
# len of types means num of rocksdb level.
|
|
compressionTypes: [0, 0, 7, 7, 7]
|
|
|
|
# natsmq configuration.
|
|
# more detail: https://docs.nats.io/running-a-nats-service/configuration
|
|
natsmq:
|
|
server: # server side configuration for natsmq.
|
|
port: 4222 # 4222 by default, Port for nats server listening.
|
|
storeDir: /var/lib/milvus/nats # /var/lib/milvus/nats by default, directory to use for JetStream storage of nats.
|
|
maxFileStore: 17179869184 # (B) 16GB by default, Maximum size of the 'file' storage.
|
|
maxPayload: 8388608 # (B) 8MB by default, Maximum number of bytes in a message payload.
|
|
maxPending: 67108864 # (B) 64MB by default, Maximum number of bytes buffered for a connection Applies to client connections.
|
|
initializeTimeout: 4000 # (ms) 4s by default, waiting for initialization of natsmq finished.
|
|
monitor:
|
|
trace: false # false by default, If true enable protocol trace log messages.
|
|
debug: false # false by default, If true enable debug log messages.
|
|
logTime: true # true by default, If set to false, log without timestamps.
|
|
logFile: /tmp/milvus/logs/nats.log # /tmp/milvus/logs/nats.log by default, Log file path relative to .. of milvus binary if use relative path.
|
|
logSizeLimit: 536870912 # (B) 512MB by default, Size in bytes after the log file rolls over to a new one.
|
|
retention:
|
|
maxAge: 4320 # (min) 3 days by default, Maximum age of any message in the P-channel.
|
|
maxBytes: # (B) None by default, How many bytes the single P-channel may contain. Removing oldest messages if the P-channel exceeds this size.
|
|
maxMsgs: # None by default, How many message the single P-channel may contain. Removing oldest messages if the P-channel exceeds this limit.
|
|
|
|
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
|
|
rootCoord:
|
|
dmlChannelNum: 16 # The number of dml channels created at system startup
|
|
maxDatabaseNum: 64 # Maximum number of database
|
|
maxPartitionNum: 4096 # Maximum number of partitions in a collection
|
|
minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed
|
|
importTaskExpiration: 900 # (in seconds) Duration after which an import task will expire (be killed). Default 900 seconds (15 minutes).
|
|
importTaskRetention: 86400 # (in seconds) Milvus will keep the record of import tasks for at least `importTaskRetention` seconds. Default 86400, seconds (24 hours).
|
|
enableActiveStandby: false
|
|
port: 53100
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
# Related configuration of proxy, used to validate client requests and reduce the returned results.
|
|
proxy:
|
|
timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
|
|
healthCheckTimetout: 3000 # ms, the interval that to do component healthy check
|
|
msgStream:
|
|
timeTick:
|
|
bufSize: 512
|
|
maxNameLength: 255 # Maximum length of name for a collection or alias
|
|
# Maximum number of fields in a collection.
|
|
# As of today (2.2.0 and after) it is strongly DISCOURAGED to set maxFieldNum >= 64.
|
|
# So adjust at your risk!
|
|
maxFieldNum: 64
|
|
maxShardNum: 16 # Maximum number of shards in a collection
|
|
maxDimension: 32768 # Maximum dimension of a vector
|
|
# Whether to produce gin logs.\n
|
|
# please adjust in embedded Milvus: false
|
|
ginLogging: true
|
|
maxTaskNum: 1024 # max task number of proxy task queue
|
|
accessLog:
|
|
localPath: /tmp/milvus_accesslog
|
|
filename: milvus_access_log.log # Log filename, leave empty to disable file log.
|
|
http:
|
|
enabled: true # Whether to enable the http server
|
|
debug_mode: false # Whether to enable http server debug mode
|
|
port: 19530
|
|
internalPort: 19529
|
|
grpc:
|
|
serverMaxSendSize: 67108864
|
|
serverMaxRecvSize: 67108864
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
|
|
queryCoord:
|
|
autoHandoff: true # Enable auto handoff
|
|
autoBalance: true # Enable auto balance
|
|
balancer: ScoreBasedBalancer # Balancer to use
|
|
globalRowCountFactor: 0.1 # expert parameters, only used by scoreBasedBalancer
|
|
scoreUnbalanceTolerationFactor: 0.05 # expert parameters, only used by scoreBasedBalancer
|
|
reverseUnBalanceTolerationFactor: 1.3 #expert parameters, only used by scoreBasedBalancer
|
|
overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
|
|
balanceIntervalSeconds: 60
|
|
memoryUsageMaxDifferencePercentage: 30
|
|
checkInterval: 1000
|
|
channelTaskTimeout: 60000 # 1 minute
|
|
segmentTaskTimeout: 120000 # 2 minute
|
|
distPullInterval: 500
|
|
heartbeatAvailableInterval: 10000 # 10s, Only QueryNodes which fetched heartbeats within the duration are available
|
|
loadTimeoutSeconds: 600
|
|
checkHandoffInterval: 5000
|
|
port: 19531
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
taskMergeCap: 1
|
|
taskExecutionCap: 256
|
|
enableActiveStandby: false # Enable active-standby
|
|
brokerTimeout: 5000 # broker rpc timeout in milliseconds
|
|
|
|
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
|
|
queryNode:
|
|
dataSync:
|
|
flowGraph:
|
|
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
|
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
|
stats:
|
|
publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
|
|
segcore:
|
|
cgoPoolSizeRatio: 2.0 # cgo pool size ratio to max read concurrency
|
|
knowhereThreadPoolNumRatio: 4
|
|
# Use more threads to make better use of SSD throughput in disk index.
|
|
# This parameter is only useful when enable-disk = true.
|
|
# And this value should be a number greater than 1 and less than 32.
|
|
chunkRows: 1024 # The number of vectors in a chunk.
|
|
growing: # growing a vector index for growing segment to accelerate search
|
|
enableIndex: true
|
|
nlist: 128 # growing segment index nlist
|
|
nprobe: 16 # nprobe to search growing segment, based on your accuracy requirement, must smaller than nlist
|
|
loadMemoryUsageFactor: 1 # The multiply factor of calculating the memory usage while loading segments
|
|
enableDisk: false # enable querynode load disk index, and search on disk index
|
|
maxDiskUsagePercentage: 95
|
|
cache:
|
|
enabled: true
|
|
memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
|
|
grouping:
|
|
enabled: true
|
|
maxNQ: 1000
|
|
topKMergeRatio: 20
|
|
scheduler:
|
|
receiveChanSize: 10240
|
|
unsolvedQueueSize: 10240
|
|
# maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
|
|
# Max read concurrency would be the value of runtime.NumCPU * maxReadConcurrentRatio.
|
|
# It defaults to 2.0, which means max read concurrency would be the value of runtime.NumCPU * 2.
|
|
# Max read concurrency must greater than or equal to 1, and less than or equal to runtime.NumCPU * 100.
|
|
# (0, 100]
|
|
maxReadConcurrentRatio: 1
|
|
cpuRatio: 10 # ratio used to estimate read task cpu usage.
|
|
maxTimestampLag: 86400
|
|
# read task schedule policy: fifo(by default), user-task-polling.
|
|
scheduleReadPolicy:
|
|
# fifo: A FIFO queue support the schedule.
|
|
# user-task-polling:
|
|
# The user's tasks will be polled one by one and scheduled.
|
|
# Scheduling is fair on task granularity.
|
|
# The policy is based on the username for authentication.
|
|
# And an empty username is considered the same user.
|
|
# When there are no multi-users, the policy decay into FIFO
|
|
name: fifo
|
|
maxPendingTask: 10240
|
|
# user-task-polling configure:
|
|
taskQueueExpire: 60 # 1 min by default, expire time of inner user task queue since queue is empty.
|
|
enableCrossUserGrouping: false # false by default Enable Cross user grouping when using user-task-polling policy. (close it if task of any user can not merge others).
|
|
maxPendingTaskPerUser: 1024 # 50 by default, max pending task in scheduler per user.
|
|
|
|
port: 21123
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
indexCoord:
|
|
bindIndexNodeMode:
|
|
enable: false
|
|
address: localhost:22930
|
|
withCred: false
|
|
nodeID: 0
|
|
segment:
|
|
minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed
|
|
|
|
indexNode:
|
|
scheduler:
|
|
buildParallel: 1
|
|
enableDisk: true # enable index node build disk vector index
|
|
maxDiskUsagePercentage: 95
|
|
port: 21121
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
dataCoord:
|
|
channel:
|
|
watchTimeoutInterval: 300 # Timeout on watching channels (in seconds). Datanode tickler update watch progress will reset timeout timer.
|
|
balanceSilentDuration: 300 # The duration before the channelBalancer on datacoord to run
|
|
balanceInterval: 360 #The interval for the channelBalancer on datacoord to check balance status
|
|
segment:
|
|
maxSize: 512 # Maximum size of a segment in MB
|
|
diskSegmentMaxSize: 2048 # Maximun size of a segment in MB for collection which has Disk index
|
|
sealProportion: 0.23
|
|
# The time of the assignment expiration in ms
|
|
# Warning! this parameter is an expert variable and closely related to data integrity. Without specific
|
|
# target and solid understanding of the scenarios, it should not be changed. If it's necessary to alter
|
|
# this parameter, make sure that the newly changed value is larger than the previous value used before restart
|
|
# otherwise there could be a large possibility of data loss
|
|
assignmentExpiration: 2000
|
|
maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
|
|
# If a segment didn't accept dml records in maxIdleTime and the size of segment is greater than
|
|
# minSizeFromIdleToSealed, Milvus will automatically seal it.
|
|
# The max idle time of segment in seconds, 10*60.
|
|
maxIdleTime: 600
|
|
minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
|
|
# The max number of binlog file for one segment, the segment will be sealed if
|
|
# the number of binlog file reaches to max value.
|
|
maxBinlogFileNumber: 32
|
|
smallProportion: 0.5 # The segment is considered as "small segment" when its # of rows is smaller than
|
|
# (smallProportion * segment max # of rows).
|
|
# A compaction will happen on small segments if the segment after compaction will have
|
|
compactableProportion: 0.85
|
|
# over (compactableProportion * segment max # of rows) rows.
|
|
# MUST BE GREATER THAN OR EQUAL TO <smallProportion>!!!
|
|
# During compaction, the size of segment # of rows is able to exceed segment max # of rows by (expansionRate-1) * 100%.
|
|
expansionRate: 1.25
|
|
enableCompaction: true # Enable data segment compaction
|
|
compaction:
|
|
enableAutoCompaction: true
|
|
rpcTimeout: 10 # compaction rpc request timeout in seconds
|
|
maxParallelTaskNum: 100 # max parallel compaction task number
|
|
indexBasedCompaction: true
|
|
|
|
enableGarbageCollection: true
|
|
gc:
|
|
interval: 3600 # gc interval in seconds
|
|
missingTolerance: 3600 # file meta missing tolerance duration in seconds, 3600
|
|
dropTolerance: 10800 # file belongs to dropped entity tolerance duration in seconds. 10800
|
|
enableActiveStandby: false
|
|
port: 13333
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
dataNode:
|
|
dataSync:
|
|
flowGraph:
|
|
maxQueueLength: 16 # Maximum length of task queue in flowgraph
|
|
maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
|
|
maxParallelSyncTaskNum: 6 # Maximum number of sync tasks executed in parallel in each flush manager
|
|
segment:
|
|
insertBufSize: 16777216 # Max buffer size to flush for a single segment.
|
|
deleteBufBytes: 67108864 # Max buffer size to flush del for a single channel
|
|
syncPeriod: 600 # The period to sync segments if buffer is not empty.
|
|
port: 21124
|
|
grpc:
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
memory:
|
|
forceSyncEnable: true # `true` to force sync if memory usage is too high
|
|
forceSyncSegmentNum: 1 # number of segments to sync, segments with top largest buffer will be synced.
|
|
watermarkStandalone: 0.2 # memory watermark for standalone, upon reaching this watermark, segments will be synced.
|
|
watermarkCluster: 0.5 # memory watermark for cluster, upon reaching this watermark, segments will be synced.
|
|
timetick:
|
|
byRPC: true
|
|
|
|
# Configures the system log output.
|
|
log:
|
|
level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
|
|
file:
|
|
rootPath: # root dir path to put logs, default "" means no log file will print. please adjust in embedded Milvus: /tmp/milvus/logs
|
|
maxSize: 300 # MB
|
|
maxAge: 10 # Maximum time for log retention in day.
|
|
maxBackups: 20
|
|
format: text # text or json
|
|
stdout: true # Stdout enable or not
|
|
|
|
grpc:
|
|
log:
|
|
level: WARNING
|
|
serverMaxSendSize: 536870912
|
|
serverMaxRecvSize: 536870912
|
|
client:
|
|
compressionEnabled: false
|
|
dialTimeout: 200
|
|
keepAliveTime: 10000
|
|
keepAliveTimeout: 20000
|
|
maxMaxAttempts: 5
|
|
initialBackoff: 1
|
|
maxBackoff: 10
|
|
backoffMultiplier: 2
|
|
clientMaxSendSize: 268435456
|
|
clientMaxRecvSize: 268435456
|
|
|
|
# Configure the proxy tls enable.
|
|
tls:
|
|
serverPemPath: configs/cert/server.pem
|
|
serverKeyPath: configs/cert/server.key
|
|
caPemPath: configs/cert/ca.pem
|
|
|
|
common:
|
|
chanNamePrefix:
|
|
cluster: by-dev
|
|
rootCoordTimeTick: rootcoord-timetick
|
|
rootCoordStatistics: rootcoord-statistics
|
|
rootCoordDml: rootcoord-dml
|
|
rootCoordDelta: rootcoord-delta
|
|
search: search
|
|
searchResult: searchResult
|
|
queryTimeTick: queryTimeTick
|
|
dataCoordStatistic: datacoord-statistics-channel
|
|
dataCoordTimeTick: datacoord-timetick-channel
|
|
dataCoordSegmentInfo: segment-info-channel
|
|
subNamePrefix:
|
|
proxySubNamePrefix: proxy
|
|
rootCoordSubNamePrefix: rootCoord
|
|
queryNodeSubNamePrefix: queryNode
|
|
dataCoordSubNamePrefix: dataCoord
|
|
dataNodeSubNamePrefix: dataNode
|
|
defaultPartitionName: _default # default partition name for a collection
|
|
defaultIndexName: _default_idx # default index name
|
|
retentionDuration: 0 # time travel reserved time, insert/delete will not be cleaned in this period. disable it by default
|
|
entityExpiration: -1 # Entity expiration in seconds, CAUTION make sure entityExpiration >= retentionDuration and -1 means never expire
|
|
indexSliceSize: 16 # MB
|
|
threadCoreCoefficient:
|
|
highPriority: 10 # This parameter specify how many times the number of threads is the number of cores in high priority thread pool
|
|
middlePriority: 5 # This parameter specify how many times the number of threads is the number of cores in middle priority thread pool
|
|
lowPriority: 1 # This parameter specify how many times the number of threads is the number of cores in low priority thread pool
|
|
DiskIndex:
|
|
MaxDegree: 56
|
|
SearchListSize: 100
|
|
PQCodeBudgetGBRatio: 0.125
|
|
BuildNumThreadsRatio: 1
|
|
SearchCacheBudgetGBRatio: 0.1
|
|
LoadNumThreadRatio: 8
|
|
BeamWidthRatio: 4
|
|
gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
|
|
gracefulStopTimeout: 1800 # seconds. it will force quit the server if the graceful stop process is not completed during this time.
|
|
storageType: minio # please adjust in embedded Milvus: local
|
|
# Default value: auto
|
|
# Valid values: [auto, avx512, avx2, avx, sse4_2]
|
|
# This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
|
|
simdType: auto
|
|
security:
|
|
authorizationEnabled: false
|
|
# The superusers will ignore some system check processes,
|
|
# like the old password verification when updating the credential
|
|
# superUsers: root
|
|
tlsMode: 0
|
|
session:
|
|
ttl: 60 # ttl value when session granting a lease to register service
|
|
retryTimes: 30 # retry times when session sending etcd requests
|
|
|
|
# preCreatedTopic decides whether using existed topic
|
|
preCreatedTopic:
|
|
enabled: false
|
|
# support pre-created topics
|
|
# the name of pre-created topics
|
|
names: ["topic1", "topic2"]
|
|
# need to set a separated topic to stand for currently consumed timestamp for each channel
|
|
timeticker: "timetick-channel"
|
|
|
|
ImportMaxFileSize: 17179869184 # 16 * 1024 * 1024 * 1024
|
|
# max file size to import for bulkInsert
|
|
|
|
locks:
|
|
metrics:
|
|
enable: false
|
|
threshold:
|
|
info: 500 # minimum milliseconds for printing durations in info level
|
|
warn: 1000 # minimum milliseconds for printing durations in warn level
|
|
|
|
# QuotaConfig, configurations of Milvus quota and limits.
|
|
# By default, we enable:
|
|
# 1. TT protection;
|
|
# 2. Memory protection.
|
|
# 3. Disk quota protection.
|
|
# You can enable:
|
|
# 1. DML throughput limitation;
|
|
# 2. DDL, DQL qps/rps limitation;
|
|
# 3. DQL Queue length/latency protection;
|
|
# 4. DQL result rate protection;
|
|
# If necessary, you can also manually force to deny RW requests.
|
|
quotaAndLimits:
|
|
enabled: true # `true` to enable quota and limits, `false` to disable.
|
|
limits:
|
|
maxCollectionNum: 65536
|
|
maxCollectionNumPerDB: 65536
|
|
# quotaCenterCollectInterval is the time interval that quotaCenter
|
|
# collects metrics from Proxies, Query cluster and Data cluster.
|
|
# seconds, (0 ~ 65536)
|
|
quotaCenterCollectInterval: 3
|
|
ddl:
|
|
enabled: false
|
|
collectionRate: -1 # qps, default no limit, rate for CreateCollection, DropCollection, LoadCollection, ReleaseCollection
|
|
partitionRate: -1 # qps, default no limit, rate for CreatePartition, DropPartition, LoadPartition, ReleasePartition
|
|
indexRate:
|
|
enabled: false
|
|
max: -1 # qps, default no limit, rate for CreateIndex, DropIndex
|
|
flushRate:
|
|
enabled: false
|
|
max: -1 # qps, default no limit, rate for flush
|
|
compactionRate:
|
|
enabled: false
|
|
max: -1 # qps, default no limit, rate for manualCompaction
|
|
dml:
|
|
# dml limit rates, default no limit.
|
|
# The maximum rate will not be greater than max.
|
|
enabled: false
|
|
insertRate:
|
|
collection:
|
|
max: -1 # MB/s, default no limit
|
|
max: -1 # MB/s, default no limit
|
|
upsertRate:
|
|
collection:
|
|
max: -1 # MB/s, default no limit
|
|
max: -1 # MB/s, default no limit
|
|
deleteRate:
|
|
collection:
|
|
max: -1 # MB/s, default no limit
|
|
max: -1 # MB/s, default no limit
|
|
bulkLoadRate:
|
|
collection:
|
|
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
|
max: -1 # MB/s, default no limit, not support yet. TODO: limit bulkLoad rate
|
|
dql:
|
|
# dql limit rates, default no limit.
|
|
# The maximum rate will not be greater than max.
|
|
enabled: false
|
|
searchRate:
|
|
collection:
|
|
max: -1 # vps (vectors per second), default no limit
|
|
max: -1 # vps (vectors per second), default no limit
|
|
queryRate:
|
|
collection:
|
|
max: -1 # qps, default no limit
|
|
max: -1 # qps, default no limit
|
|
limitWriting:
|
|
# forceDeny false means dml requests are allowed (except for some
|
|
# specific conditions, such as memory of nodes to water marker), true means always reject all dml requests.
|
|
forceDeny: false
|
|
ttProtection:
|
|
enabled: false
|
|
# maxTimeTickDelay indicates the backpressure for DML Operations.
|
|
# DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
|
|
# if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
|
|
# seconds
|
|
maxTimeTickDelay: 300
|
|
memProtection:
|
|
# When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
|
|
# When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
|
|
# When memory usage < memoryLowWaterLevel, no action.
|
|
enabled: true
|
|
dataNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in DataNodes
|
|
dataNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in DataNodes
|
|
queryNodeMemoryLowWaterLevel: 0.85 # (0, 1], memoryLowWaterLevel in QueryNodes
|
|
queryNodeMemoryHighWaterLevel: 0.95 # (0, 1], memoryHighWaterLevel in QueryNodes
|
|
growingSegmentsSizeProtection:
|
|
# No action will be taken if the growing segments size is less than the low watermark.
|
|
# When the growing segments size exceeds the low watermark, the dml rate will be reduced,
|
|
# but the rate will not be lower than `minRateRatio * dmlRate`.
|
|
enabled: false
|
|
minRateRatio: 0.5
|
|
lowWaterLevel: 0.2
|
|
highWaterLevel: 0.4
|
|
diskProtection:
|
|
enabled: true # When the total file size of object storage is greater than `diskQuota`, all dml requests would be rejected;
|
|
diskQuota: -1 # MB, (0, +inf), default no limit
|
|
diskQuotaPerCollection: -1 # MB, (0, +inf), default no limit
|
|
limitReading:
|
|
# forceDeny false means dql requests are allowed (except for some
|
|
# specific conditions, such as collection has been dropped), true means always reject all dql requests.
|
|
forceDeny: false
|
|
queueProtection:
|
|
enabled: false
|
|
# nqInQueueThreshold indicated that the system was under backpressure for Search/Query path.
|
|
# If NQ in any QueryNode's queue is greater than nqInQueueThreshold, search&query rates would gradually cool off
|
|
# until the NQ in queue no longer exceeds nqInQueueThreshold. We think of the NQ of query request as 1.
|
|
# int, default no limit
|
|
nqInQueueThreshold: -1
|
|
# queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
|
|
# If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
|
|
# until the latency of queuing no longer exceeds queueLatencyThreshold.
|
|
# The latency here refers to the averaged latency over a period of time.
|
|
# milliseconds, default no limit
|
|
queueLatencyThreshold: -1
|
|
resultProtection:
|
|
enabled: false
|
|
# maxReadResultRate indicated that the system was under backpressure for Search/Query path.
|
|
# If dql result rate is greater than maxReadResultRate, search&query rates would gradually cool off
|
|
# until the read result rate no longer exceeds maxReadResultRate.
|
|
# MB/s, default no limit
|
|
maxReadResultRate: -1
|
|
# colOffSpeed is the speed of search&query rates cool off.
|
|
# (0, 1]
|
|
coolOffSpeed: 0.9
|
|
|
|
trace:
|
|
# trace exporter type, default is stdout,
|
|
# optional values: ['stdout', 'jaeger']
|
|
exporter: stdout
|
|
# fraction of traceID based sampler,
|
|
# optional values: [0, 1]
|
|
# Fractions >= 1 will always sample. Fractions < 0 are treated as zero.
|
|
sampleFraction: 0
|
|
jaeger:
|
|
url: # "http://127.0.0.1:14268/api/traces"
|
|
# when exporter is jaeger should set the jaeger's URL
|
|
|
|
autoIndex:
|
|
params:
|
|
build: '{"M": 18,"efConstruction": 240,"index_type": "HNSW", "metric_type": "IP"}'
|