enhance: [ci] organize values file in the form of milvus deployment o… (#35839)

https://github.com/milvus-io/milvus/issues/35842
https://github.com/milvus-io/milvus/pull/35832

Signed-off-by: Yellow Shine <sammy.huang@zilliz.com>
This commit is contained in:
yellow-shine 2024-08-30 13:07:15 +08:00 committed by GitHub
parent 41582a90ad
commit 8cd75d627c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 1628 additions and 7 deletions

View File

@ -1,4 +1,4 @@
@Library('jenkins-shared-library@v0.34.0') _
@Library('jenkins-shared-library@v0.40.0') _
def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml'
@ -57,10 +57,11 @@ pipeline {
gitBaseRef: gitBaseRef,
pullRequestNumber: "$env.CHANGE_ID",
suppress_suffix_of_image_tag: true,
test_client_type: '["pytest"]'
images: '["milvus","pytest","helm"]'
milvus_image_tag = tekton.query_result job_name, 'milvus-image-tag'
pytest_image = tekton.query_result job_name, 'pytest-image-fqdn'
helm_image = tekton.query_result job_name, 'helm-image-fqdn'
}
}
}
@ -100,7 +101,9 @@ pipeline {
ciMode: 'nightly',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
milvus_deployment_option: milvus_deployment_option
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
}
}
}

View File

@ -1,4 +1,4 @@
@Library('jenkins-shared-library@v0.34.0') _
@Library('jenkins-shared-library@v0.40.0') _
def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml'
def milvus_helm_chart_version = '4.2.8'
@ -43,10 +43,11 @@ pipeline {
gitBaseRef: gitBaseRef,
pullRequestNumber: "$env.CHANGE_ID",
suppress_suffix_of_image_tag: true,
test_client_type: '["pytest"]'
images: '["milvus","pytest","helm"]'
milvus_image_tag = tekton.query_result job_name, 'milvus-image-tag'
pytest_image = tekton.query_result job_name, 'pytest-image-fqdn'
helm_image = tekton.query_result job_name, 'helm-image-fqdn'
}
}
}
@ -88,7 +89,9 @@ pipeline {
ciMode: 'e2e',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
milvus_deployment_option: milvus_deployment_option
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
} catch (Exception e) {
println e
}
@ -98,7 +101,9 @@ pipeline {
ciMode: 'e2e',
milvus_image_tag: milvus_image_tag,
pytest_image: pytest_image,
milvus_deployment_option: milvus_deployment_option
helm_image: helm_image,
milvus_deployment_option: milvus_deployment_option,
verbose: 'false'
}
}
}

5
tests/_helm/Dockerfile Normal file
View File

@ -0,0 +1,5 @@
FROM alpine/helm:3.15.3
WORKDIR /app
COPY tests/_helm/values values

View File

@ -0,0 +1,267 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
replicaCount: 2
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -0,0 +1,269 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
streaming:
enabled: true
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
replicaCount: 2
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -0,0 +1,267 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35402-20240812-402f716b5
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
enabled: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -0,0 +1,275 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
enabled: true
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
enabled: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -0,0 +1,65 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
etcd:
enabled: false
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
extraConfigFiles:
user.yaml: |
etcd:
use:
embed: true
data:
dir: /var/lib/milvus/etcd
common:
storageType: local
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35432-20240812-71a1562ea
indexCoordinator:
gc:
interval: 1
indexCoord:
scheduler:
interval: 100
indexNode:
disk:
enabled: true
metrics:
serviceMonitor:
enabled: true
minio:
enabled: false
mode: standalone
tls:
enabled: false
pulsar:
enabled: false
queryNode:
disk:
enabled: true
service:
type: ClusterIP
standalone:
disk:
enabled: true
extraEnv:
- name: ETCD_CONFIG_PATH
value: /milvus/configs/advanced/etcd.yaml
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -0,0 +1,75 @@
cluster:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
defaultReplicationFactor: 2
enabled: true
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -0,0 +1,74 @@
cluster:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: true
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -0,0 +1,76 @@
cluster:
enabled: true
streaming:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: true
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -0,0 +1,73 @@
cluster:
enabled: false
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -0,0 +1,73 @@
cluster:
enabled: false
common:
security:
authorizationEnabled: true
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true

View File

@ -0,0 +1,94 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
enabled: false
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
extraConfigFiles:
user.yaml: |
etcd:
use:
embed: true
data:
dir: /var/lib/milvus/etcd
common:
storageType: local
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
enabled: false
mode: standalone
tls:
enabled: false
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true
extraEnv:
- name: ETCD_CONFIG_PATH
value: /milvus/configs/advanced/etcd.yaml
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists