Merge pull request #2284 from del-zhenwu/java_case

Update server-version in cases && update java cases
This commit is contained in:
groot 2020-05-11 03:57:55 -05:00 committed by GitHub
commit 0ee3c633ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 2611 additions and 3810 deletions

View File

@ -14,6 +14,7 @@ public class TestSearchByIds {
int n_list = 1024;
int default_n_list = 16384;
int nb = 10000;
int small_nb = 10;
int n_probe = 20;
int top_k = 10;
int nq = 5;
@ -22,6 +23,7 @@ public class TestSearchByIds {
IndexType defaultIndexType = IndexType.FLAT;
List<Long> default_ids = Utils.toListIds(1111);
List<List<Float>> vectors = Utils.genVectors(nb, dimension, true);
List<List<Float>> small_vectors = Utils.genVectors(small_nb, dimension, true);
List<ByteBuffer> vectorsBinary = Utils.genBinaryVectors(nb, dimension);
String indexParam = Utils.setIndexParam(n_list);
public String searchParamStr = Utils.setSearchParam(n_probe);
@ -62,6 +64,22 @@ public class TestSearchByIds {
List<List<SearchResponse.QueryResult>> res_search = client.searchByIds(searchParam).getQueryResultsList();
assert (client.searchByIds(searchParam).getResponse().ok());
Assert.assertEquals(res_search.get(0).size(), 0);
}
@Test(dataProvider = "Collection", dataProviderClass = MainClass.class)
public void test_search_count_lt_top_k(MilvusClient client, String collectionName) {
int top_k = 100;
InsertParam insertParam = new InsertParam.Builder(collectionName).withFloatVectors(small_vectors).build();
InsertResponse res_insert = client.insert(insertParam);
client.flush(collectionName);
SearchByIdsParam searchParam = new SearchByIdsParam.Builder(collectionName)
.withParamsInJson(searchParamStr)
.withTopK(top_k)
.withIDs(Utils.toListIds(res_insert.getVectorIds().get(0)))
.build();
List<List<SearchResponse.QueryResult>> res_search = client.searchByIds(searchParam).getQueryResultsList();
// reason: "Failed to query by id in collection L2_FmVKbqSZaN, result doesn\'t match id count"
assert (!client.searchByIds(searchParam).getResponse().ok());
// Assert.assertEquals(res_search.size(), default_ids.size());
// Assert.assertEquals(res_search.get(0).get(0).getVectorId(), -1);
}

View File

@ -15,6 +15,7 @@ public class TestSearchVectors {
int n_list = 1024;
int default_n_list = 16384;
int nb = 10000;
int small_nb = 10;
int n_probe = 20;
int top_k = 10;
int nq = 5;
@ -22,6 +23,7 @@ public class TestSearchVectors {
IndexType indexType = IndexType.IVF_SQ8;
IndexType defaultIndexType = IndexType.FLAT;
List<List<Float>> vectors = Utils.genVectors(nb, dimension, true);
List<List<Float>> small_vectors = Utils.genVectors(small_nb, dimension, true);
List<ByteBuffer> vectorsBinary = Utils.genBinaryVectors(nb, dimension);
List<List<Float>> queryVectors = vectors.subList(0, nq);
List<ByteBuffer> queryVectorsBinary = vectorsBinary.subList(0, nq);
@ -190,6 +192,21 @@ public class TestSearchVectors {
assert (!res_search.getResponse().ok());
}
@Test(dataProvider = "Collection", dataProviderClass = MainClass.class)
public void test_search_count_lt_top_k(MilvusClient client, String collectionName) {
int top_k_new = 100;
InsertParam insertParam = new InsertParam.Builder(collectionName).withFloatVectors(small_vectors).build();
client.insert(insertParam);
client.flush(collectionName);
SearchParam searchParam = new SearchParam.Builder(collectionName)
.withFloatVectors(queryVectors)
.withParamsInJson(searchParamStr)
.withTopK(top_k_new).build();
List<List<SearchResponse.QueryResult>> res_search = client.search(searchParam).getQueryResultsList();
Assert.assertEquals(res_search.size(), nq);
Assert.assertEquals(res_search.get(0).size(), small_vectors.size());
}
@Test(dataProvider = "Collection", dataProviderClass = MainClass.class)
public void test_search_invalid_top_k(MilvusClient client, String collectionName) {
int top_k_new = 0;

View File

@ -5,4 +5,6 @@ logs/
*idmap*.txt
__pycache__/
venv
.idea
.idea
suites/

View File

@ -1,17 +1,18 @@
timeout(time: 1440, unit: 'MINUTES') {
timeout(time: 4000, unit: 'MINUTES') {
try {
dir ("milvus-helm") {
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
// sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
// sh 'helm repo update'
checkout([$class: 'GitSCM', branches: [[name: "${HELM_BRANCH}"]], userRemoteConfigs: [[url: "${HELM_URL}", name: 'origin', refspec: "+refs/heads/${HELM_BRANCH}:refs/remotes/origin/${HELM_BRANCH}"]]])
}
dir ("milvus_benchmark") {
print "Git clone url: ${TEST_URL}:${TEST_BRANCH}"
checkout([$class: 'GitSCM', branches: [[name: "${TEST_BRANCH}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "${TEST_URL}", name: 'origin', refspec: "+refs/heads/${TEST_BRANCH}:refs/remotes/origin/${TEST_BRANCH}"]]])
print "Install requirements"
sh "python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com"
// sh "python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com"
sh "python3 -m pip install -r requirements.txt"
sh "python3 -m pip install git+${TEST_LIB_URL}"
sh "python3 main.py --hostname=${params.SERVER_HOST} --image-tag=${IMAGE_TAG} --image-type=${params.IMAGE_TYPE} --suite=suites/${params.SUITE}"
sh "python3 main.py --image-version=${params.IMAGE_VERSION} --schedule-conf=scheduler/${params.CONFIG_FILE}"
}
} catch (exc) {
echo 'Deploy Test Failed !'

View File

@ -0,0 +1,46 @@
timeout(time: 15, unit: 'MINUTES') {
def imageName = "milvus/engine:${DOCKER_VERSION}"
def remoteImageName = "milvusdb/daily-build:${REMOTE_DOCKER_VERSION}"
def localDockerRegistryImage = "${params.LOCAL_DOKCER_REGISTRY_URL}/${imageName}"
def remoteDockerRegistryImage = "${params.REMOTE_DOKCER_REGISTRY_URL}/${remoteImageName}"
try {
deleteImages("${localDockerRegistryImage}", true)
def pullSourceImageStatus = sh(returnStatus: true, script: "docker pull ${localDockerRegistryImage}")
if (pullSourceImageStatus == 0) {
def renameImageStatus = sh(returnStatus: true, script: "docker tag ${localDockerRegistryImage} ${remoteImageName} && docker rmi ${localDockerRegistryImage}")
def sourceImage = docker.image("${remoteImageName}")
docker.withRegistry("https://${params.REMOTE_DOKCER_REGISTRY_URL}", "${params.REMOTE_DOCKER_CREDENTIALS_ID}") {
sourceImage.push()
sourceImage.push("${REMOTE_DOCKER_LATEST_VERSION}")
}
} else {
echo "\"${localDockerRegistryImage}\" image does not exist !"
}
} catch (exc) {
throw exc
} finally {
deleteImages("${localDockerRegistryImage}", true)
deleteImages("${remoteDockerRegistryImage}", true)
}
}
boolean deleteImages(String imageName, boolean force) {
def imageNameStr = imageName.trim()
def isExistImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageNameStr} 2>&1 > /dev/null")
if (isExistImage == 0) {
def deleteImageStatus = 0
if (force) {
def imageID = sh(returnStdout: true, script: "docker inspect --type=image --format \"{{.ID}}\" ${imageNameStr}")
deleteImageStatus = sh(returnStatus: true, script: "docker rmi -f ${imageID}")
} else {
deleteImageStatus = sh(returnStatus: true, script: "docker rmi ${imageNameStr}")
}
if (deleteImageStatus != 0) {
return false
}
}
return true
}

View File

@ -6,17 +6,14 @@ pipeline {
}
parameters{
choice choices: ['gpu', 'cpu'], description: 'cpu or gpu version', name: 'IMAGE_TYPE'
string defaultValue: 'master', description: 'server image version', name: 'IMAGE_VERSION', trim: true
choice choices: ['poseidon', 'eros', 'apollo', 'athena'], description: 'server host', name: 'SERVER_HOST'
string defaultValue: 'gpu_search_performance_sift1m.yaml', description: 'test suite config yaml', name: 'SUITE', trim: true
string defaultValue: '080_data.json', description: 'test suite config yaml', name: 'CONFIG_FILE', trim: true
string defaultValue: '09509e53-9125-4f5d-9ce8-42855987ad67', description: 'git credentials', name: 'GIT_USER', trim: true
}
environment {
IMAGE_TAG = "${params.IMAGE_VERSION}-${params.IMAGE_TYPE}-ubuntu18.04-release"
HELM_URL = "https://github.com/milvus-io/milvus-helm.git"
HELM_BRANCH = "0.6.0"
HELM_BRANCH = "master"
TEST_URL = "git@192.168.1.105:Test/milvus_benchmark.git"
TEST_BRANCH = "master"
TEST_LIB_URL = "http://192.168.1.105:6060/Test/milvus_metrics.git"
@ -37,8 +34,8 @@ pipeline {
componet: test
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.2
- name: milvus-test-env
image: registry.zilliz.com/milvus/milvus-test-env:v0.2
command:
- cat
tty: true
@ -79,10 +76,25 @@ pipeline {
}
stages {
stage("Publish Daily Docker images") {
steps {
container('milvus-test-env') {
script {
boolean isNightlyTest = isTimeTriggeredBuild()
if (isNightlyTest) {
build job: 'milvus-publish-daily-docker', wait: false
} else {
echo "Skip publish daily docker images ..."
}
}
}
}
}
stage("Deploy Test") {
steps {
gitlabCommitStatus(name: 'Deploy Test') {
container('milvus-testframework') {
container('milvus-test-env') {
script {
print "In Deploy Test Stage"
load "${env.WORKSPACE}/ci/jenkinsfile/deploy_test.groovy"
@ -95,7 +107,7 @@ pipeline {
stage ("Cleanup Env") {
steps {
gitlabCommitStatus(name: 'Cleanup Env') {
container('milvus-testframework') {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup.groovy"
}
@ -106,7 +118,7 @@ pipeline {
}
post {
always {
container('milvus-testframework') {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup.groovy"
}
@ -131,3 +143,10 @@ pipeline {
}
}
}
boolean isTimeTriggeredBuild() {
if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
return true
}
return false
}

View File

@ -0,0 +1,103 @@
pipeline {
agent none
options {
timestamps()
}
parameters{
string defaultValue: 'registry.zilliz.com', description: 'Local Docker registry URL', name: 'LOCAL_DOKCER_REGISTRY_URL', trim: true
string defaultValue: 'registry-1.docker.io', description: 'Remote Docker registry URL', name: 'REMOTE_DOKCER_REGISTRY_URL', trim: true
string defaultValue: 'milvus-docker-access-token', description: 'Remote Docker credentials id', name: 'REMOTE_DOCKER_CREDENTIALS_ID', trim: true
}
environment {
DAILY_BUILD_VERSION = VersionNumber([
versionNumberString : '${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
]);
}
stages {
stage('Push Daily Docker Images') {
matrix {
agent none
axes {
axis {
name 'OS_NAME'
values 'ubuntu18.04', 'centos7'
}
axis {
name 'CPU_ARCH'
values 'amd64'
}
axis {
name 'BINARY_VERSION'
values 'gpu', 'cpu'
}
}
stages {
stage("Publish Docker Images") {
environment {
DOCKER_VERSION = "master-${BINARY_VERSION}-${OS_NAME}-release"
REMOTE_DOCKER_VERSION = "${OS_NAME}-${BINARY_VERSION}-${DAILY_BUILD_VERSION}"
REMOTE_DOCKER_LATEST_VERSION = "${OS_NAME}-${BINARY_VERSION}-latest"
}
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-publish-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-images
image: registry.zilliz.com/library/docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
resources:
limits:
memory: "4Gi"
cpu: "1.0"
requests:
memory: "2Gi"
cpu: "0.5"
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
"""
}
}
stages {
stage('Publish') {
steps {
container('publish-images') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/publishDailyImages.groovy"
}
}
}
}
}
}
}
}
}
}
}

View File

@ -0,0 +1,536 @@
#!/usr/bin/env python3
import sys
import argparse
from argparse import Namespace
import os, shutil
import getopt
from ruamel.yaml import YAML, yaml_object
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from ruamel.yaml.tokens import CommentToken
##
yaml = YAML(typ="rt")
## format yaml file
yaml.indent(mapping=2, sequence=4, offset=2)
############################################
# Comment operation
#
############################################
def _extract_comment(_comment):
"""
remove '#' at start of comment
"""
# if _comment is empty, do nothing
if not _comment:
return _comment
# str_ = _comment.lstrip(" ")
str_ = _comment.strip()
str_ = str_.lstrip("#")
return str_
def _add_eol_comment(element, *args, **kwargs):
"""
add_eol_comment
args --> (comment, key)
"""
if element is None or \
(not isinstance(element, CommentedMap) and
not isinstance(element, CommentedSeq)) or \
args[0] is None or \
len(args[0]) == 0:
return
comment = args[0]
# comment is empty, do nothing
if not comment:
return
key = args[1]
try:
element.yaml_add_eol_comment(*args, **kwargs)
except Exception:
element.ca.items.pop(key, None)
element.yaml_add_eol_comment(*args, **kwargs)
def _map_comment(_element, _key):
origin_comment = ""
token = _element.ca.items.get(_key, None)
if token is not None:
try:
origin_comment = token[2].value
except Exception:
try:
# comment is below element, add profix "#\n"
col = _element.lc.col + 2
space_list = [" " for i in range(col)]
space_str = "".join(space_list)
origin_comment = "\n" + "".join([space_str + t.value for t in token[3]])
except Exception:
pass
return origin_comment
def _seq_comment(_element, _index):
# get target comment
_comment = ""
token = _element.ca.items.get(_index, None)
if token is not None:
_comment = token[0].value
return _comment
def _start_comment(_element):
_comment = ""
cmt = _element.ca.comment
try:
_comment = cmt[1][0].value
except Exception:
pass
return _comment
def _comment_counter(_comment):
"""
counter comment tips and split into list
"""
x = lambda l: l.strip().strip("#").strip()
_counter = []
if _comment.startswith("\n"):
_counter.append("")
_counter.append(x(_comment[1:]))
return _counter
elif _comment.startswith("#\n"):
_counter.append("")
_counter.append(x(_comment[2:]))
else:
index = _comment.find("\n")
_counter.append(x(_comment[:index]))
_counter.append(x(_comment[index + 1:]))
return _counter
def _obtain_comment(_m_comment, _t_comment):
if not _m_comment or not _t_comment:
return _m_comment or _t_comment
_m_counter = _comment_counter(_m_comment)
_t_counter = _comment_counter(_t_comment)
if not _m_counter[0] and not _t_counter[1]:
comment = _t_comment + _m_comment
elif not _m_counter[1] and not _t_counter[0]:
comment = _m_comment + _t_comment
elif _t_counter[0] and _t_counter[1]:
comment = _t_comment
elif not _t_counter[0] and not _t_counter[1]:
comment = _m_comment
elif not _m_counter[0] and not _m_counter[1]:
comment = _t_comment
else:
if _t_counter[0]:
comment = _m_comment.replace(_m_counter[0], _t_counter[0], 1)
else:
comment = _m_comment.replace(_m_counter[1], _t_counter[1], 1)
i = comment.find("\n\n")
while i >= 0:
comment = comment.replace("\n\n\n", "\n\n", 1)
i = comment.find("\n\n\n")
return comment
############################################
# Utils
#
############################################
def _get_update_par(_args):
_dict = _args.__dict__
# file path
_in_file = _dict.get("f", None) or _dict.get("file", None)
# tips
_tips = _dict.get('tips', None) or "Input \"-h\" for more information"
# update
_u = _dict.get("u", None) or _dict.get("update", None)
# apppend
_a = _dict.get('a', None) or _dict.get('append', None)
# out stream group
_i = _dict.get("i", None) or _dict.get("inplace", None)
_o = _dict.get("o", None) or _dict.get("out_file", None)
return _in_file, _u, _a, _i, _o, _tips
############################################
# Element operation
#
############################################
def update_map_element(element, key, value, comment, _type):
"""
element:
key:
value:
comment:
_type: value type.
"""
if element is None or not isinstance(element, CommentedMap):
print("Only key-value update support")
sys.exit(1)
origin_comment = _map_comment(element, key)
sub_element = element.get(key, None)
if isinstance(sub_element, CommentedMap) or isinstance(sub_element, CommentedSeq):
print("Only support update a single value")
element.update({key: value})
comment = _obtain_comment(origin_comment, comment)
_add_eol_comment(element, _extract_comment(comment), key)
def update_seq_element(element, value, comment, _type):
if element is None or not isinstance(element, CommentedSeq):
print("Param `-a` only use to append yaml list")
sys.exit(1)
element.append(str(value))
comment = _obtain_comment("", comment)
_add_eol_comment(element, _extract_comment(comment), len(element) - 1)
def run_update(code, keys, value, comment, _app):
key_list = keys.split(".")
space_str = ":\n "
key_str = "{}".format(key_list[0])
for key in key_list[1:]:
key_str = key_str + space_str + key
space_str = space_str + " "
if not _app:
yaml_str = """{}: {}""".format(key_str, value)
else:
yaml_str = "{}{}- {}".format(key_str, space_str, value)
if comment:
yaml_str = "{} # {}".format(yaml_str, comment)
mcode = yaml.load(yaml_str)
_merge(code, mcode)
def _update(code, _update, _app, _tips):
if not _update:
return code
_update_list = [l.strip() for l in _update.split(",")]
for l in _update_list:
try:
variant, comment = l.split("#")
except ValueError:
variant = l
comment = None
try:
keys, value = variant.split("=")
run_update(code, keys, value, comment, _app)
except ValueError:
print("Invalid format. print command \"--help\" get more info.")
sys.exit(1)
return code
def _backup(in_file_p):
backup_p = in_file_p + ".bak"
if os.path.exists(backup_p):
os.remove(backup_p)
if not os.path.exists(in_file_p):
print("File {} not exists.".format(in_file_p))
sys.exit(1)
shutil.copyfile(in_file_p, backup_p) # 复制文件
def _recovery(in_file_p):
backup_p = in_file_p + ".bak"
if not os.path.exists(in_file_p):
print("File {} not exists.".format(in_file_p))
sys.exit(1)
elif not os.path.exists(backup_p):
print("Backup file not exists")
sys.exit(0)
os.remove(in_file_p)
os.rename(backup_p, in_file_p)
# master merge target
def _merge(master, target):
if type(master) != type(target):
print("yaml format not match:\n")
yaml.dump(master, sys.stdout)
print("\n&&\n")
yaml.dump(target, sys.stdout)
sys.exit(1)
## item is a sequence
if isinstance(target, CommentedSeq):
for index in range(len(target)):
# get target comment
target_comment = _seq_comment(target, index)
master_index = len(master)
target_item = target[index]
if isinstance(target_item, CommentedMap):
merge_flag = False
for idx in range(len(master)):
if isinstance(master[idx], CommentedMap):
if master[idx].keys() == target_item.keys():
_merge(master[idx], target_item)
# nonlocal merge_flag
master_index = idx
merge_flag = True
break
if merge_flag is False:
master.append(target_item)
elif target_item not in master:
master.append(target[index])
else:
# merge(master[index], target[index])
pass
# # remove enter signal in previous item
previous_comment = _seq_comment(master, master_index - 1)
_add_eol_comment(master, _extract_comment(previous_comment), master_index - 1)
origin_comment = _seq_comment(master, master_index)
comment = _obtain_comment(origin_comment, target_comment)
if len(comment) > 0:
_add_eol_comment(master, _extract_comment(comment) + "\n\n", len(master) - 1)
## item is a map
elif isinstance(target, CommentedMap):
for item in target:
if item == "flag":
print("")
origin_comment = _map_comment(master, item)
target_comment = _map_comment(target, item)
# get origin start comment
origin_start_comment = _start_comment(master)
# get target start comment
target_start_comment = _start_comment(target)
m = master.get(item, default=None)
if m is None or \
(not (isinstance(m, CommentedMap) or
isinstance(m, CommentedSeq))):
master.update({item: target[item]})
else:
_merge(master[item], target[item])
comment = _obtain_comment(origin_comment, target_comment)
if len(comment) > 0:
_add_eol_comment(master, _extract_comment(comment), item)
start_comment = _obtain_comment(origin_start_comment, target_start_comment)
if len(start_comment) > 0:
master.yaml_set_start_comment(_extract_comment(start_comment))
def _save(_code, _file):
with open(_file, 'w') as wf:
yaml.dump(_code, wf)
def _load(_file):
with open(_file, 'r') as rf:
code = yaml.load(rf)
return code
############################################
# sub parser process operation
#
############################################
def merge_yaml(_args):
_dict = _args.__dict__
_m_file = _dict.get("merge_file", None)
_in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
if not (_in_file and _m_file):
print(_tips)
sys.exit(1)
code = _load(_in_file)
mcode = _load(_m_file)
_merge(code, mcode)
_update(code, _u, _a, _tips)
if _i:
_backup(_in_file)
_save(code, _in_file)
elif _o:
_save(code, _o)
else:
print(_tips)
sys.exit(1)
def update_yaml(_args):
_in_file, _u, _a, _i, _o, _tips = _get_update_par(_args)
if not _in_file or not _u:
print(_tips)
sys.exit(1)
code = _load(_in_file)
if _i and _o:
print(_tips)
sys.exit(1)
_update(code, _u, _a, _tips)
if _i:
_backup(_in_file)
_save(code, _in_file)
elif _o:
_save(code, _o)
def reset(_args):
_dict = _args.__dict__
_f = _dict.get('f', None) or _dict.get('file', None)
if _f:
_recovery(_f)
else:
_t = _dict.get('tips', None) or "Input \"-h\" for more information"
print(_t)
############################################
# Cli operation
#
############################################
def _set_merge_parser(_parsers):
"""
config merge parser
"""
merge_parser = _parsers.add_parser("merge", help="merge with another yaml file")
_set_merge_parser_arg(merge_parser)
_set_update_parser_arg(merge_parser)
merge_parser.set_defaults(
function=merge_yaml,
tips=merge_parser.format_help()
)
def _set_merge_parser_arg(_parser):
"""
config parser argument for merging
"""
_parser.add_argument("-m", "--merge-file", help="indicate merge yaml file")
def _set_update_parser(_parsers):
"""
config merge parser
"""
update_parser = _parsers.add_parser("update", help="update with another yaml file")
_set_update_parser_arg(update_parser)
update_parser.set_defaults(
function=update_yaml,
tips=update_parser.format_help()
)
def _set_update_parser_arg(_parser):
"""
config parser argument for updating
"""
_parser.add_argument("-f", "--file", help="source yaml file")
_parser.add_argument('-u', '--update', help="update with args, instance as \"a.b.c=d# d comment\"")
_parser.add_argument('-a', '--append', action="store_true", help="append to a seq")
group = _parser.add_mutually_exclusive_group()
group.add_argument("-o", "--out-file", help="indicate output yaml file")
group.add_argument("-i", "--inplace", action="store_true", help="indicate whether result store in origin file")
def _set_reset_parser(_parsers):
"""
config merge parser
"""
reset_parser = _parsers.add_parser("reset", help="reset yaml file")
# indicate yaml file
reset_parser.add_argument('-f', '--file', help="indicate input yaml file")
reset_parser.set_defaults(
function=reset,
tips=reset_parser.format_help()
)
def main():
parser = argparse.ArgumentParser()
sub_parsers = parser.add_subparsers()
# set merge command
_set_merge_parser(sub_parsers)
# set update command
_set_update_parser(sub_parsers)
# set reset command
_set_reset_parser(sub_parsers)
# parse argument and run func
args = parser.parse_args()
args.function(args)
if __name__ == '__main__':
main()

View File

@ -18,8 +18,11 @@ INDEX_MAP = {
"ivf_sq8": IndexType.IVF_SQ8,
"nsg": IndexType.RNSG,
"ivf_sq8h": IndexType.IVF_SQ8H,
"ivf_pq": IndexType.IVF_PQ
"ivf_pq": IndexType.IVF_PQ,
"hnsw": IndexType.HNSW,
"annoy": IndexType.ANNOY
}
epsilon = 0.1
def time_wrapper(func):
"""
@ -35,24 +38,23 @@ def time_wrapper(func):
class MilvusClient(object):
def __init__(self, table_name=None, ip=None, port=None, timeout=60):
self._milvus = Milvus()
self._table_name = table_name
def __init__(self, collection_name=None, ip=None, port=None, timeout=60):
self._collection_name = collection_name
try:
i = 1
start_time = time.time()
if not ip:
self._milvus.connect(
self._milvus = Milvus(
host = SERVER_HOST_DEFAULT,
port = SERVER_PORT_DEFAULT)
else:
# retry connect for remote server
while time.time() < start_time + timeout:
try:
self._milvus.connect(
self._milvus = Milvus(
host = ip,
port = port)
if self._milvus.connected() is True:
if self._milvus.server_status():
logger.debug("Try connect times: %d, %s" % (i, round(time.time() - start_time, 2)))
break
except Exception as e:
@ -63,16 +65,23 @@ class MilvusClient(object):
raise e
def __str__(self):
return 'Milvus table %s' % self._table_name
return 'Milvus collection %s' % self._collection_name
def check_status(self, status):
if not status.OK():
logger.error(status.message)
# raise Exception("Status not ok")
raise Exception("Status not ok")
def create_table(self, table_name, dimension, index_file_size, metric_type):
if not self._table_name:
self._table_name = table_name
def check_result_ids(self, result):
for index, item in enumerate(result):
if item[0].distance >= epsilon:
logger.error(index)
logger.error(item[0].distance)
raise Exception("Distance wrong")
def create_collection(self, collection_name, dimension, index_file_size, metric_type):
if not self._collection_name:
self._collection_name = collection_name
if metric_type == "l2":
metric_type = MetricType.L2
elif metric_type == "ip":
@ -81,61 +90,82 @@ class MilvusClient(object):
metric_type = MetricType.JACCARD
elif metric_type == "hamming":
metric_type = MetricType.HAMMING
elif metric_type == "sub":
metric_type = MetricType.SUBSTRUCTURE
elif metric_type == "super":
metric_type = MetricType.SUPERSTRUCTURE
else:
logger.error("Not supported metric_type: %s" % metric_type)
create_param = {'table_name': table_name,
create_param = {'collection_name': collection_name,
'dimension': dimension,
'index_file_size': index_file_size,
"metric_type": metric_type}
status = self._milvus.create_table(create_param)
status = self._milvus.create_collection(create_param)
self.check_status(status)
@time_wrapper
def insert(self, X, ids=None):
status, result = self._milvus.add_vectors(self._table_name, X, ids)
status, result = self._milvus.add_vectors(self._collection_name, X, ids)
self.check_status(status)
return status, result
@time_wrapper
def create_index(self, index_type, nlist):
index_params = {
"index_type": INDEX_MAP[index_type],
"nlist": nlist,
}
logger.info("Building index start, table_name: %s, index_params: %s" % (self._table_name, json.dumps(index_params)))
status = self._milvus.create_index(self._table_name, index=index_params)
def delete_vectors(self, ids):
status = self._milvus.delete_by_id(self._collection_name, ids)
self.check_status(status)
@time_wrapper
def flush(self):
status = self._milvus.flush([self._collection_name])
self.check_status(status)
@time_wrapper
def compact(self):
status = self._milvus.compact(self._collection_name)
self.check_status(status)
@time_wrapper
def create_index(self, index_type, index_param=None):
index_type = INDEX_MAP[index_type]
logger.info("Building index start, collection_name: %s, index_type: %s" % (self._collection_name, index_type))
if index_param:
logger.info(index_param)
status = self._milvus.create_index(self._collection_name, index_type, index_param)
self.check_status(status)
def describe_index(self):
status, result = self._milvus.describe_index(self._table_name)
status, result = self._milvus.describe_index(self._collection_name)
self.check_status(status)
index_type = None
for k, v in INDEX_MAP.items():
if result._index_type == v:
index_type = k
break
nlist = result._nlist
res = {
"index_type": index_type,
"nlist": nlist
}
return res
return {"index_type": index_type, "index_param": result._params}
def drop_index(self):
logger.info("Drop index: %s" % self._table_name)
return self._milvus.drop_index(self._table_name)
logger.info("Drop index: %s" % self._collection_name)
return self._milvus.drop_index(self._collection_name)
@time_wrapper
def query(self, X, top_k, nprobe):
status, result = self._milvus.search_vectors(self._table_name, top_k, nprobe, X)
def query(self, X, top_k, search_param=None):
status, result = self._milvus.search_vectors(self._collection_name, top_k, query_records=X, params=search_param)
self.check_status(status)
return result
def count(self):
return self._milvus.get_table_row_count(self._table_name)[1]
@time_wrapper
def query_ids(self, top_k, ids, search_param=None):
status, result = self._milvus.search_by_ids(self._collection_name, ids, top_k, params=search_param)
self.check_result_ids(result)
return result
def delete(self, timeout=60):
logger.info("Start delete table: %s" % self._table_name)
self._milvus.delete_table(self._table_name)
def count(self):
return self._milvus.count_collection(self._collection_name)[1]
def delete(self, timeout=120):
timeout = int(timeout)
logger.info("Start delete collection: %s" % self._collection_name)
self._milvus.drop_collection(self._collection_name)
i = 0
while i < timeout:
if self.count():
@ -145,24 +175,26 @@ class MilvusClient(object):
else:
break
if i >= timeout:
logger.error("Delete table timeout")
logger.error("Delete collection timeout")
def describe(self):
return self._milvus.describe_table(self._table_name)
return self._milvus.describe_collection(self._collection_name)
def show_tables(self):
return self._milvus.show_tables()
def show_collections(self):
return self._milvus.show_collections()
def exists_table(self, table_name=None):
if table_name is None:
table_name = self._table_name
status, res = self._milvus.has_table(table_name)
self.check_status(status)
def exists_collection(self, collection_name=None):
if collection_name is None:
collection_name = self._collection_name
status, res = self._milvus.has_collection(collection_name)
# self.check_status(status)
return res
@time_wrapper
def preload_table(self):
return self._milvus.preload_table(self._table_name, timeout=3000)
def preload_collection(self):
status = self._milvus.preload_collection(self._collection_name, timeout=3000)
self.check_status(status)
return status
def get_server_version(self):
status, res = self._milvus.server_version()
@ -192,20 +224,20 @@ class MilvusClient(object):
return res
def fit(table_name, X):
def fit(collection_name, X):
milvus = Milvus()
milvus.connect(host = SERVER_HOST_DEFAULT, port = SERVER_PORT_DEFAULT)
start = time.time()
status, ids = milvus.add_vectors(table_name, X)
status, ids = milvus.add_vectors(collection_name, X)
end = time.time()
logger(status, round(end - start, 2))
def fit_concurrent(table_name, process_num, vectors):
def fit_concurrent(collection_name, process_num, vectors):
processes = []
for i in range(process_num):
p = Process(target=fit, args=(table_name, vectors, ))
p = Process(target=fit, args=(collection_name, vectors, ))
processes.append(p)
p.start()
for p in processes:
@ -216,12 +248,12 @@ if __name__ == "__main__":
import numpy
import sklearn.preprocessing
# table_name = "tset_test"
# # table_name = "test_tset1"
# m = MilvusClient(table_name)
# collection_name = "tset_test"
# # collection_name = "test_tset1"
# m = MilvusClient(collection_name)
# m.delete()
# time.sleep(2)
# m.create_table(table_name, 128, 20, "ip")
# m.create_collection(collection_name, 128, 20, "ip")
# print(m.describe())
# print(m.count())

View File

@ -33,47 +33,47 @@ class DockerRunner(Runner):
continue
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["table_name"]
collection_name = param["collection_name"]
volume_name = param["db_path_prefix"]
print(table_name)
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
print(collection_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
for k, v in param.items():
if k.startswith("server."):
# Update server config
utils.modify_config(k, v, type="server", db_slave=None)
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if milvus.exists_table():
milvus = MilvusClient(collection_name)
# Check has collection or not
if milvus.exists_collection():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
milvus.create_collection(collection_name, dimension, index_file_size, metric_type)
# debug
# milvus.create_index("ivf_sq8", 16384)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"])
logger.info(res)
# wait for file merge
time.sleep(table_size * dimension / 5000000)
time.sleep(collection_size * dimension / 5000000)
# Clear up
utils.remove_container(container)
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
collection_name = param["dataset"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
logger.debug(milvus.show_tables())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
milvus = MilvusClient(collection_name)
logger.debug(milvus.show_collections())
# Check has collection or not
if not milvus.exists_collection():
logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
continue
# parse index info
index_types = param["index.index_types"]
@ -90,17 +90,17 @@ class DockerRunner(Runner):
logger.info(result)
logger.info(milvus.count())
# preload index
milvus.preload_table()
milvus.preload_collection()
logger.info("Start warm up query")
res = self.do_query(milvus, table_name, [1], [1], 1, 1)
res = self.do_query(milvus, collection_name, [1], [1], 1, 1)
logger.info("End warm up query")
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
utils.print_collection(headers, nqs, res)
utils.remove_container(container)
elif run_type == "insert_performance":
@ -114,28 +114,28 @@ class DockerRunner(Runner):
continue
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["table_name"]
collection_name = param["collection_name"]
volume_name = param["db_path_prefix"]
print(table_name)
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
print(collection_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
for k, v in param.items():
if k.startswith("server."):
# Update server config
utils.modify_config(k, v, type="server", db_slave=None)
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if milvus.exists_table():
milvus = MilvusClient(collection_name)
# Check has collection or not
if milvus.exists_collection():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
milvus.create_collection(collection_name, dimension, index_file_size, metric_type)
# debug
# milvus.create_index("ivf_sq8", 16384)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"])
logger.info(res)
# wait for file merge
time.sleep(table_size * dimension / 5000000)
time.sleep(collection_size * dimension / 5000000)
# Clear up
utils.remove_container(container)
@ -147,19 +147,19 @@ class DockerRunner(Runner):
container = None
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
collection_name = param["dataset"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
logger.debug(milvus.show_tables())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
milvus = MilvusClient(collection_name)
logger.debug(milvus.show_collections())
# Check has collection or not
if not milvus.exists_collection():
logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
continue
# parse index info
index_types = param["index.index_types"]
@ -176,17 +176,17 @@ class DockerRunner(Runner):
logger.info(result)
logger.info(milvus.count())
# preload index
milvus.preload_table()
milvus.preload_collection()
logger.info("Start warm up query")
res = self.do_query(milvus, table_name, [1], [1], 1, 1)
res = self.do_query(milvus, collection_name, [1], [1], 1, 1)
logger.info("End warm up query")
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
utils.print_collection(headers, nqs, res)
utils.remove_container(container)
elif run_type == "accuracy":
@ -212,21 +212,21 @@ class DockerRunner(Runner):
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
collection_name = param["dataset"]
sift_acc = False
if "sift_acc" in param:
sift_acc = param["sift_acc"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
for k, v in param.items():
if k.startswith("server."):
utils.modify_config(k, v, type="server")
volume_name = param["db_path_prefix"]
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
milvus = MilvusClient(collection_name)
# Check has collection or not
if not milvus.exists_collection():
logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
continue
# parse index info
@ -236,7 +236,7 @@ class DockerRunner(Runner):
top_ks, nqs, nprobes = parser.search_params_parser(param)
if sift_acc is True:
# preload groundtruth data
true_ids_all = self.get_groundtruth_ids(table_size)
true_ids_all = self.get_groundtruth_ids(collection_size)
acc_dict = {}
for index_type in index_types:
for nlist in nlists:
@ -244,7 +244,7 @@ class DockerRunner(Runner):
logger.info(result)
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
milvus.preload_collection()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
@ -252,17 +252,17 @@ class DockerRunner(Runner):
for nq in nqs:
result_ids = []
id_prefix = "%s_index_%s_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, index_type, nlist, metric_type, nprobe, top_k, nq)
(collection_name, index_type, nlist, metric_type, nprobe, top_k, nq)
if sift_acc is False:
self.do_query_acc(milvus, table_name, top_k, nq, nprobe, id_prefix)
self.do_query_acc(milvus, collection_name, top_k, nq, nprobe, id_prefix)
if index_type != "flat":
# Compute accuracy
base_name = "%s_index_flat_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, nlist, metric_type, nprobe, top_k, nq)
(collection_name, nlist, metric_type, nprobe, top_k, nq)
avg_acc = self.compute_accuracy(base_name, id_prefix)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, avg_acc))
else:
result_ids, result_distances = self.do_query_ids(milvus, table_name, top_k, nq, nprobe)
result_ids, result_distances = self.do_query_ids(milvus, collection_name, top_k, nq, nprobe)
debug_file_ids = "0.5.3_result_ids"
debug_file_distances = "0.5.3_result_distances"
with open(debug_file_ids, "w+") as fd:
@ -276,10 +276,10 @@ class DockerRunner(Runner):
fd.write("%s\n" % str(true_item))
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, acc_value))
# # print accuracy table
# headers = [table_name]
# # print accuracy collection
# headers = [collection_name]
# headers.extend([str(top_k) for top_k in top_ks])
# utils.print_table(headers, nqs, res)
# utils.print_collection(headers, nqs, res)
# remove container, and run next definition
logger.info("remove container, and run next definition")
@ -295,10 +295,10 @@ class DockerRunner(Runner):
container = None
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
collection_name = param["dataset"]
index_type = param["index_type"]
volume_name = param["db_path_prefix"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
# set default test time
if "during_time" not in param:
@ -317,10 +317,10 @@ class DockerRunner(Runner):
container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None)
time.sleep(2)
milvus = MilvusClient(table_name)
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
milvus = MilvusClient(collection_name)
# Check has collection or not
if not milvus.exists_collection():
logger.warning("Table %s not existed, continue exec next params ..." % collection_name)
continue
start_time = time.time()
@ -331,22 +331,22 @@ class DockerRunner(Runner):
processes = []
# do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# milvus_instance = MilvusClient(collection_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 100)])
# nprobe = random.choice([x for x in range(1, 1000)])
# # logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], [nprobe], run_count, ))
# p = Process(target=self.do_query, args=(milvus_instance, collection_name, [top_k], [nq], [nprobe], run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
milvus_instance = MilvusClient(table_name)
milvus_instance = MilvusClient(collection_name)
top_ks = random.sample([x for x in range(1, 100)], 3)
nqs = random.sample([x for x in range(1, 1000)], 3)
nprobe = random.choice([x for x in range(1, 500)])
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count)
if i % 10 == 0:
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():

View File

@ -5,10 +5,11 @@ import time
import re
import random
import traceback
import json
from multiprocessing import Process
import numpy as np
from yaml import full_load, dump
from client import MilvusClient
import utils
import parser
from runner import Runner
from milvus_metrics.api import report
@ -23,6 +24,7 @@ INSERT_INTERVAL = 50000
timestamp = int(time.time())
default_path = "/var/lib/milvus"
class K8sRunner(Runner):
"""run docker mode"""
def __init__(self):
@ -33,41 +35,17 @@ class K8sRunner(Runner):
self.hostname = None
self.env_value = None
def init_env(self, server_config, args):
self.hostname = args.hostname
# update server_config
helm_path = os.path.join(os.getcwd(), "../milvus-helm/milvus")
server_config_file = helm_path+"/ci/config/sqlite/%s/server_config.yaml" % (args.image_type)
if not os.path.exists(server_config_file):
raise Exception("File %s not existed" % server_config_file)
if server_config:
logger.debug("Update server config")
utils.update_server_config(server_config_file, server_config)
# update log_config
log_config_file = helm_path+"/config/log_config.conf"
if not os.path.exists(log_config_file):
raise Exception("File %s not existed" % log_config_file)
src_log_config_file = helm_path+"/config/log_config.conf.src"
if not os.path.exists(src_log_config_file):
# copy
os.system("cp %s %s" % (log_config_file, src_log_config_file))
else:
# reset
os.system("cp %s %s" % (src_log_config_file, log_config_file))
if "db_config.primary_path" in server_config:
os.system("sed -i 's#%s#%s#g' %s" % (default_path, server_config["db_config.primary_path"], log_config_file))
# with open(log_config_file, "r+") as fd:
# for line in fd.readlines():
# fd.write(re.sub(r'^%s' % default_path, server_config["db_config.primary_path"], line))
def init_env(self, server_config, server_host, image_type, image_tag):
self.hostname = server_host
# update values
helm_path = os.path.join(os.getcwd(), "../milvus-helm")
values_file_path = helm_path+"/values.yaml"
if not os.path.exists(values_file_path):
raise Exception("File %s not existed" % values_file_path)
utils.update_values(values_file_path, args.hostname)
utils.update_values(values_file_path, server_host, server_config)
try:
logger.debug("Start install server")
self.host, self.ip = utils.helm_install_server(helm_path, args.image_tag, args.image_type, self.name, namespace)
self.host, self.ip = utils.helm_install_server(helm_path, image_tag, image_type, self.name, namespace)
except Exception as e:
logger.error("Helm install server failed: %s" % str(e))
logger.error(traceback.format_exc())
@ -83,9 +61,9 @@ class K8sRunner(Runner):
def clean_up(self):
logger.debug(self.name)
utils.helm_del_server(self.name)
utils.helm_del_server(self.name, namespace)
def report_wrapper(self, milvus_instance, env_value, hostname, table_info, index_info, search_params):
def report_wrapper(self, milvus_instance, env_value, hostname, collection_info, index_info, search_params):
metric = Metric()
metric.set_run_id(timestamp)
metric.env = Env(env_value)
@ -95,45 +73,51 @@ class K8sRunner(Runner):
server_mode = milvus_instance.get_server_mode()
commit = milvus_instance.get_server_commit()
metric.server = Server(version=server_version, mode=server_mode, build_commit=commit)
metric.table = table_info
metric.collection = collection_info
metric.index = index_info
metric.search = search_params
return metric
def run(self, run_type, table):
def run(self, run_type, collection):
logger.debug(run_type)
logger.debug(table)
table_name = table["table_name"]
milvus_instance = MilvusClient(table_name=table_name, ip=self.ip)
logger.debug(collection)
collection_name = collection["collection_name"]
milvus_instance = MilvusClient(collection_name=collection_name, ip=self.ip)
self.env_value = milvus_instance.get_server_config()
# ugly implemention
self.env_value.pop("logs")
if run_type == "insert_performance":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
ni_per = table["ni_per"]
build_index = table["build_index"]
if milvus_instance.exists_table():
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"]
if milvus_instance.exists_collection():
milvus_instance.delete()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_table(table_name, dimension, index_file_size, metric_type)
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
if build_index is True:
index_type = table["index_type"]
nlist = table["nlist"]
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info = {
"index_type": index_type,
"index_nlist": nlist
"index_param": index_param
}
milvus_instance.create_index(index_type, nlist)
res = self.do_insert(milvus_instance, table_name, data_type, dimension, table_size, ni_per)
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
table_info = {
milvus_instance.flush()
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_params)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": "insert_performance",
"type": run_type,
"value": {
"total_time": res["total_time"],
"qps": res["qps"],
@ -141,24 +125,56 @@ class K8sRunner(Runner):
}
}
report(metric)
logger.debug("Wait for file merge")
time.sleep(120)
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
elif run_type == "build_performance":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
index_type = table["index_type"]
nlist = table["nlist"]
table_info = {
if run_type == "insert_flush_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
if milvus_instance.exists_collection():
milvus_instance.delete()
time.sleep(10)
index_info = {}
search_params = {}
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
logger.debug(milvus_instance.count())
start_time = time.time()
milvus_instance.flush()
end_time = time.time()
logger.debug(milvus_instance.count())
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": run_type,
"value": {
"flush_time": round(end_time - start_time, 1)
}
}
report(metric)
elif run_type == "build_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
index_type = collection["index_type"]
index_param = collection["index_param"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
index_info = {
"index_type": index_type,
"index_nlist": nlist
"index_param": index_param
}
if not milvus_instance.exists_table():
logger.error("Table name: %s not existed" % table_name)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
search_params = {}
start_time = time.time()
@ -166,11 +182,12 @@ class K8sRunner(Runner):
logger.debug("Drop index")
milvus_instance.drop_index()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
milvus_instance.create_index(index_type, nlist)
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
logger.debug(milvus_instance.count())
end_time = time.time()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_params)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": "build_performance",
"value": {
@ -182,47 +199,90 @@ class K8sRunner(Runner):
}
report(metric)
elif run_type == "search_performance":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
run_count = table["run_count"]
search_params = table["search_params"]
table_info = {
elif run_type == "delete_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
search_params = {}
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
if not milvus_instance.exists_table():
logger.error("Table name: %s not existed" % table_name)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
index_info = {
"index_type": result["index_type"],
"index_nlist": result["nlist"]
}
length = milvus_instance.count()
logger.info(length)
index_info = milvus_instance.describe_index()
logger.info(index_info)
nprobes = search_params["nprobes"]
top_ks = search_params["top_ks"]
nqs = search_params["nqs"]
milvus_instance.preload_table()
ids = [i for i in range(length)]
loops = int(length / ni_per)
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
start_time = time.time()
for i in range(loops):
delete_ids = ids[i*ni_per : i*ni_per+ni_per]
logger.debug("Delete %d - %d" % (delete_ids[0], delete_ids[-1]))
milvus_instance.delete_vectors(delete_ids)
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count())
logger.debug("Table row counts: %d" % milvus_instance.count())
milvus_instance.flush()
end_time = time.time()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug("Table row counts: %d" % milvus_instance.count())
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params)
metric.metrics = {
"type": "delete_performance",
"value": {
"delete_time": round(end_time - start_time, 1),
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
elif run_type == "search_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
run_count = collection["run_count"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
# fro debugging
# time.sleep(3600)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
milvus_instance.preload_collection()
logger.info("Start warm up query")
res = self.do_query(milvus_instance, table_name, [1], [1], 1, 2)
res = self.do_query(milvus_instance, collection_name, [1], [1], 2, search_param=search_params[0])
logger.info("End warm up query")
for nprobe in nprobes:
logger.info("Search nprobe: %s" % nprobe)
res = self.do_query(milvus_instance, table_name, top_ks, nqs, nprobe, run_count)
for search_param in search_params:
logger.info("Search param: %s" % json.dumps(search_param))
res = self.do_query(milvus_instance, collection_name, top_ks, nqs, run_count, search_param)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
logger.info("Search param: %s" % json.dumps(search_param))
utils.print_table(headers, nqs, res)
for index_nq, nq in enumerate(nqs):
for index_top_k, top_k in enumerate(top_ks):
search_param = {
"nprobe": nprobe,
search_param_group = {
"nq": nq,
"topk": top_k
"topk": top_k,
"search_param": search_param
}
search_time = res[index_nq][index_top_k]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_param)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "search_performance",
"value": {
@ -231,44 +291,93 @@ class K8sRunner(Runner):
}
report(metric)
# for sift/deep datasets
elif run_type == "accuracy":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
search_params = table["search_params"]
table_info = {
elif run_type == "search_ids_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
ids_length = collection["ids_length"]
ids = collection["ids"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
if not milvus_instance.exists_table():
logger.error("Table name: %s not existed" % table_name)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
index_info = {
"index_type": result["index_type"],
"index_nlist": result["nlist"]
}
index_info = milvus_instance.describe_index()
logger.info(index_info)
nprobes = search_params["nprobes"]
top_ks = search_params["top_ks"]
nqs = search_params["nqs"]
milvus_instance.preload_table()
true_ids_all = self.get_groundtruth_ids(table_size)
for nprobe in nprobes:
logger.info("Search nprobe: %s" % nprobe)
g_top_k = int(collection["top_ks"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
g_id = int(ids.split("-")[1])
l_id = int(ids.split("-")[0])
g_id_length = int(ids_length.split("-")[1])
l_id_length = int(ids_length.split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
ids_num = random.randint(l_id_length, g_id_length)
ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_ids_stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
# for sift/deep datasets
# TODO: enable
elif run_type == "accuracy":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
top_ks = collection["top_ks"]
nqs = collection["nqs"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
milvus_instance.preload_collection()
true_ids_all = self.get_groundtruth_ids(collection_size)
for search_param in search_params:
for top_k in top_ks:
for nq in nqs:
total = 0
search_param = {
"nprobe": nprobe,
search_param_group = {
"nq": nq,
"topk": top_k
"topk": top_k,
"search_param": search_param
}
result_ids, result_distances = self.do_query_ids(milvus_instance, table_name, top_k, nq, nprobe)
logger.info("Query params: %s" % json.dumps(search_param_group))
result_ids, result_distances = self.do_query_ids(milvus_instance, collection_name, top_k, nq, search_param=search_param)
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_param)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "accuracy",
"value": {
@ -278,29 +387,33 @@ class K8sRunner(Runner):
report(metric)
elif run_type == "ann_accuracy":
hdf5_source_file = table["source_file"]
table_name = table["table_name"]
index_file_sizes = table["index_file_sizes"]
index_types = table["index_types"]
nlists = table["nlists"]
search_params = table["search_params"]
nprobes = search_params["nprobes"]
top_ks = search_params["top_ks"]
nqs = search_params["nqs"]
data_type, dimension, metric_type = parser.parse_ann_table_name(table_name)
table_info = {
hdf5_source_file = collection["source_file"]
collection_name = collection["collection_name"]
index_file_sizes = collection["index_file_sizes"]
index_types = collection["index_types"]
index_params = collection["index_params"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
# mapping to index param list
index_params = self.generate_combinations(index_params)
data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
dataset = utils.get_dataset(hdf5_source_file)
if milvus_instance.exists_table(table_name):
logger.info("Re-create table: %s" % table_name)
milvus_instance.delete(table_name)
if milvus_instance.exists_collection(collection_name):
logger.info("Re-create collection: %s" % collection_name)
milvus_instance.delete()
time.sleep(DELETE_INTERVAL_TIME)
true_ids = np.array(dataset["neighbors"])
for index_file_size in index_file_sizes:
milvus_instance.create_table(table_name, dimension, index_file_size, metric_type)
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
logger.info(milvus_instance.describe())
insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
# Insert batch once
@ -315,38 +428,41 @@ class K8sRunner(Runner):
milvus_instance.insert(tmp_vectors.tolist(), ids=[i for i in range(start, end)])
else:
milvus_instance.insert(tmp_vectors, ids=[i for i in range(start, end)])
time.sleep(20)
logger.info("Table: %s, row count: %s" % (table_name, milvus_instance.count()))
milvus_instance.flush()
logger.info("Table: %s, row count: %s" % (collection_name, milvus_instance.count()))
if milvus_instance.count() != len(insert_vectors):
logger.error("Table row count is not equal to insert vectors")
return
for index_type in index_types:
for nlist in nlists:
milvus_instance.create_index(index_type, nlist)
# logger.info(milvus_instance.describe_index())
logger.info("Start preload table: %s, index_type: %s, nlist: %s" % (table_name, index_type, nlist))
milvus_instance.preload_table()
for index_param in index_params:
logger.debug("Building index with param: %s" % json.dumps(index_param))
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
logger.info("Start preload collection: %s" % collection_name)
milvus_instance.preload_collection()
index_info = {
"index_type": index_type,
"index_nlist": nlist
"index_param": index_param
}
for nprobe in nprobes:
logger.debug(index_info)
for search_param in search_params:
for nq in nqs:
query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
for top_k in top_ks:
search_params = {
search_param_group = {
"nq": len(query_vectors),
"nprobe": nprobe,
"topk": top_k
"topk": top_k,
"search_param": search_param
}
logger.debug(search_param_group)
if not isinstance(query_vectors, list):
result = milvus_instance.query(query_vectors.tolist(), top_k, nprobe)
result = milvus_instance.query(query_vectors.tolist(), top_k, search_param=search_param)
else:
result = milvus_instance.query(query_vectors, top_k, nprobe)
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
result_ids = result.id_array
acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
logger.info("Query ann_accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_params)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "ann_accuracy",
"value": {
@ -354,50 +470,44 @@ class K8sRunner(Runner):
}
}
report(metric)
milvus_instance.delete()
elif run_type == "search_stability":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
search_params = table["search_params"]
during_time = table["during_time"]
table_info = {
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
if not milvus_instance.exists_table():
logger.error("Table name: %s not existed" % table_name)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
index_info = {
"index_type": result["index_type"],
"index_nlist": result["nlist"]
}
search_param = {}
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_nprobe = int(search_params["nprobes"].split("-")[1])
g_top_k = int(search_params["top_ks"].split("-")[1])
g_nq = int(search_params["nqs"].split("-")[1])
l_nprobe = int(search_params["nprobes"].split("-")[0])
l_top_k = int(search_params["top_ks"].split("-")[0])
l_nq = int(search_params["nqs"].split("-")[0])
milvus_instance.preload_table()
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
logger.info("Start warm up query")
res = self.do_query(milvus_instance, table_name, [1], [1], 1, 2)
logger.info("End warm up query")
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
nprobe = random.randint(l_nprobe, g_nprobe)
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
logger.debug("Query nprobe:%d, nq:%d, top-k:%d" % (nprobe, nq, top_k))
result = milvus_instance.query(query_vectors, top_k, nprobe)
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_param)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_stability",
"value": {
@ -410,56 +520,58 @@ class K8sRunner(Runner):
report(metric)
elif run_type == "stability":
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
search_params = table["search_params"]
insert_xb = table["insert_xb"]
insert_interval = table["insert_interval"]
during_time = table["during_time"]
table_info = {
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
insert_xb = collection["insert_xb"]
insert_interval = collection["insert_interval"]
delete_xb = collection["delete_xb"]
during_time = collection["during_time"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": table_name
"dataset_name": collection_name
}
if not milvus_instance.exists_table():
logger.error("Table name: %s not existed" % table_name)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
index_info = {
"index_type": result["index_type"],
"index_nlist": result["nlist"]
}
search_param = {}
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_nprobe = int(search_params["nprobes"].split("-")[1])
g_top_k = int(search_params["top_ks"].split("-")[1])
g_nq = int(search_params["nqs"].split("-")[1])
l_nprobe = int(search_params["nprobes"].split("-")[0])
l_top_k = int(search_params["top_ks"].split("-")[0])
l_nq = int(search_params["nqs"].split("-")[0])
milvus_instance.preload_table()
logger.info("Start warm up query")
res = self.do_query(milvus_instance, table_name, [1], [1], 1, 2)
logger.info("End warm up query")
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
i = 0
ids = []
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
while time.time() < start_time + during_time * 60:
i = i + 1
for j in range(insert_interval):
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
nprobe = random.randint(l_nprobe, g_nprobe)
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
logger.debug("Query nprobe:%d, nq:%d, top-k:%d" % (nprobe, nq, top_k))
result = milvus_instance.query(query_vectors, top_k, nprobe)
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
search_param = {}
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors[0:nq], top_k, search_param=search_param)
count = milvus_instance.count()
insert_ids = [(count+x) for x in range(len(insert_vectors))]
ids.extend(insert_ids)
status, res = milvus_instance.insert(insert_vectors, ids=insert_ids)
logger.debug("%d, row_count: %d" % (i, milvus_instance.count()))
milvus_instance.delete_vectors(ids[-delete_xb:])
milvus_instance.flush()
milvus_instance.compact()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
end_row_count = milvus_instance.count()
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, table_info, index_info, search_param)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "stability",
"value": {
@ -470,4 +582,9 @@ class K8sRunner(Runner):
"row_count_increments": end_row_count - start_row_count
}
}
report(metric)
report(metric)
else:
logger.warning("Run type not defined")
return
logger.debug("Test finished")

View File

@ -3,13 +3,17 @@ import logging
import pdb
import time
import random
import json
from multiprocessing import Process
import numpy as np
import concurrent.futures
from client import MilvusClient
import utils
import parser
from runner import Runner
DELETE_INTERVAL_TIME = 5
INSERT_INTERVAL = 50000
logger = logging.getLogger("milvus_benchmark.local_runner")
@ -20,200 +24,307 @@ class LocalRunner(Runner):
self.ip = ip
self.port = port
def run(self, definition, run_type=None):
if run_type == "performance":
for op_type, op_value in definition.items():
run_count = op_value["run_count"]
run_params = op_value["params"]
def run(self, run_type, collection):
logger.debug(run_type)
logger.debug(collection)
collection_name = collection["collection_name"]
milvus_instance = MilvusClient(collection_name=collection_name, ip=self.ip, port=self.port)
logger.info(milvus_instance.show_collections())
env_value = milvus_instance.get_server_config()
logger.debug(env_value)
if op_type == "insert":
for index, param in enumerate(run_params):
table_name = param["table_name"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
# Check has table or not
if milvus.exists_table():
milvus.delete()
time.sleep(10)
milvus.create_table(table_name, dimension, index_file_size, metric_type)
res = self.do_insert(milvus, table_name, data_type, dimension, table_size, param["ni_per"])
logger.info(res)
if run_type in ["insert_performance", "insert_flush_performance"]:
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
build_index = collection["build_index"]
if milvus_instance.exists_collection():
milvus_instance.delete()
time.sleep(10)
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, collection_name, data_type, dimension, collection_size, ni_per)
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count())
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
elif op_type == "query":
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
elif run_type == "delete_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
ni_per = collection["ni_per"]
if not milvus_instance.exists_collection():
logger.error(milvus_instance.show_collections())
logger.warning("Table: %s not found" % collection_name)
return
length = milvus_instance.count()
ids = [i for i in range(length)]
loops = int(length / ni_per)
for i in range(loops):
delete_ids = ids[i*ni_per : i*ni_per+ni_per]
logger.debug("Delete %d - %d" % (delete_ids[0], delete_ids[-1]))
milvus_instance.delete_vectors(delete_ids)
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count())
logger.debug("Table row counts: %d" % milvus_instance.count())
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count())
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
logger.info(milvus.describe())
logger.info(milvus.describe_index())
logger.info(milvus.count())
logger.info(milvus.show_tables())
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
# milvus.drop_index()
elif run_type == "build_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
index_type = collection["index_type"]
index_param = collection["index_param"]
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
search_params = {}
start_time = time.time()
# drop index
logger.debug("Drop index")
milvus_instance.drop_index()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
logger.debug("Table row counts: %d" % milvus_instance.count())
end_time = time.time()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug("Diff memory: %s, current memory usage: %s, build time: %s" % ((end_mem_usage - start_mem_usage), end_mem_usage, round(end_time - start_time, 1)))
for index_type in index_types:
for nlist in nlists:
# milvus.create_index(index_type, nlist)
# preload index
logger.info("Start preloading table")
milvus.preload_table()
logger.info("End preloading table")
# Run query test
logger.info("Start warm up query")
res = self.do_query(milvus, table_name, [1], [1], 1, 2)
logger.info("End warm up query")
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
res = self.do_query(milvus, table_name, top_ks, nqs, nprobe, run_count)
headers = ["nq/topk"]
headers.extend([str(top_k) for top_k in top_ks])
utils.print_table(headers, nqs, res)
elif run_type == "search_performance":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
run_count = collection["run_count"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
# for debugging
# time.sleep(3600)
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
logger.info(result)
milvus_instance.preload_collection()
mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.info(mem_usage)
for search_param in search_params:
logger.info("Search param: %s" % json.dumps(search_param))
res = self.do_query(milvus_instance, collection_name, top_ks, nqs, run_count, search_param)
headers = ["Nq/Top-k"]
headers.extend([str(top_k) for top_k in top_ks])
logger.info("Search param: %s" % json.dumps(search_param))
utils.print_table(headers, nqs, res)
mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.info(mem_usage)
elif run_type == "accuracy":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
sift_acc = False
if "sift_acc" in param:
sift_acc = param["sift_acc"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
elif run_type == "search_ids_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
ids_length = collection["ids_length"]
ids = collection["ids"]
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
g_id = int(ids.split("-")[1])
l_id = int(ids.split("-")[0])
g_id_length = int(ids_length.split("-")[1])
l_id_length = int(ids_length.split("-")[0])
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
logger.debug(milvus.show_tables())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
ids_num = random.randint(l_id_length, g_id_length)
l_ids = random.randint(l_id, g_id-ids_num)
# ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
ids_param = [id for id in range(l_ids, l_ids+ids_num)]
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metrics = {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage,
}
logger.info(metrics)
# parse index info
index_types = param["index.index_types"]
nlists = param["index.nlists"]
# parse top-k, nq, nprobe
top_ks, nqs, nprobes = parser.search_params_parser(param)
elif run_type == "search_performance_concurrents":
data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
hdf5_source_file = collection["source_file"]
use_single_connection = collection["use_single_connection"]
concurrents = collection["concurrents"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = self.generate_combinations(collection["search_params"])
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
result = milvus_instance.describe_index()
logger.info(result)
milvus_instance.preload_collection()
dataset = utils.get_dataset(hdf5_source_file)
for concurrent_num in concurrents:
top_k = top_ks[0]
for nq in nqs:
mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.info(mem_usage)
query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
logger.debug(search_params)
for search_param in search_params:
logger.info("Search param: %s" % json.dumps(search_param))
total_time = 0.0
if use_single_connection is True:
connections = [MilvusClient(collection_name=collection_name, ip=self.ip, port=self.port)]
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
future_results = {executor.submit(
self.do_query_qps, connections[0], query_vectors, top_k, search_param=search_param) : index for index in range(concurrent_num)}
else:
connections = [MilvusClient(collection_name=collection_name, ip=self.ip, port=self.port) for i in range(concurrent_num)]
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
future_results = {executor.submit(
self.do_query_qps, connections[index], query_vectors, top_k, search_param=search_param) : index for index in range(concurrent_num)}
for future in concurrent.futures.as_completed(future_results):
total_time = total_time + future.result()
qps_value = total_time / concurrent_num
logger.debug("QPS value: %f, total_time: %f, request_nums: %f" % (qps_value, total_time, concurrent_num))
mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.info(mem_usage)
if sift_acc is True:
# preload groundtruth data
true_ids_all = self.get_groundtruth_ids(table_size)
elif run_type == "ann_accuracy":
hdf5_source_file = collection["source_file"]
collection_name = collection["collection_name"]
index_file_sizes = collection["index_file_sizes"]
index_types = collection["index_types"]
index_params = collection["index_params"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
# mapping to index param list
index_params = self.generate_combinations(index_params)
acc_dict = {}
for index_type in index_types:
for nlist in nlists:
result = milvus.describe_index()
logger.info(result)
# milvus.drop_index()
milvus.create_index(index_type, nlist)
# preload index
milvus.preload_table()
# Run query test
for nprobe in nprobes:
logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe))
data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
dataset = utils.get_dataset(hdf5_source_file)
if milvus_instance.exists_collection(collection_name):
logger.info("Re-create collection: %s" % collection_name)
milvus_instance.delete()
time.sleep(DELETE_INTERVAL_TIME)
true_ids = np.array(dataset["neighbors"])
for index_file_size in index_file_sizes:
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
logger.info(milvus_instance.describe())
insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
logger.debug(len(insert_vectors))
# Insert batch once
# milvus_instance.insert(insert_vectors)
loops = len(insert_vectors) // INSERT_INTERVAL + 1
for i in range(loops):
start = i*INSERT_INTERVAL
end = min((i+1)*INSERT_INTERVAL, len(insert_vectors))
tmp_vectors = insert_vectors[start:end]
if start < end:
if not isinstance(tmp_vectors, list):
milvus_instance.insert(tmp_vectors.tolist(), ids=[i for i in range(start, end)])
else:
milvus_instance.insert(tmp_vectors, ids=[i for i in range(start, end)])
milvus_instance.flush()
logger.info("Table: %s, row count: %s" % (collection_name, milvus_instance.count()))
if milvus_instance.count() != len(insert_vectors):
logger.error("Table row count is not equal to insert vectors")
return
for index_type in index_types:
for index_param in index_params:
logger.debug("Building index with param: %s" % json.dumps(index_param))
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
logger.info("Start preload collection: %s" % collection_name)
milvus_instance.preload_collection()
for search_param in search_params:
for nq in nqs:
query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
for top_k in top_ks:
for nq in nqs:
result_ids = []
id_prefix = "%s_index_%s_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, index_type, nlist, metric_type, nprobe, top_k, nq)
if sift_acc is False:
self.do_query_acc(milvus, table_name, top_k, nq, nprobe, id_prefix)
if index_type != "flat":
# Compute accuracy
base_name = "%s_index_flat_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \
(table_name, nlist, metric_type, nprobe, top_k, nq)
avg_acc = self.compute_accuracy(base_name, id_prefix)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, avg_acc))
else:
result_ids, result_distances = self.do_query_ids(milvus, table_name, top_k, nq, nprobe)
debug_file_ids = "0.5.3_result_ids"
debug_file_distances = "0.5.3_result_distances"
with open(debug_file_ids, "w+") as fd:
total = 0
for index, item in enumerate(result_ids):
true_item = true_ids_all[:nq, :top_k].tolist()[index]
tmp = set(item).intersection(set(true_item))
total = total + len(tmp)
fd.write("query: N-%d, intersection: %d, total: %d\n" % (index, len(tmp), total))
fd.write("%s\n" % str(item))
fd.write("%s\n" % str(true_item))
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query: <%s> accuracy: %s" % (id_prefix, acc_value))
# # print accuracy table
# headers = [table_name]
# headers.extend([str(top_k) for top_k in top_ks])
# utils.print_table(headers, nqs, res)
logger.debug("Search nq: %d, top-k: %d, search_param: %s" % (nq, top_k, json.dumps(search_param)))
if not isinstance(query_vectors, list):
result = milvus_instance.query(query_vectors.tolist(), top_k, search_param=search_param)
else:
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
result_ids = result.id_array
acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
logger.info("Query ann_accuracy: %s" % acc_value)
elif run_type == "stability":
for op_type, op_value in definition.items():
if op_type != "query":
logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type)
break
run_count = op_value["run_count"]
run_params = op_value["params"]
nq = 100000
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
insert_xb = collection["insert_xb"]
insert_interval = collection["insert_interval"]
delete_xb = collection["delete_xb"]
# flush_interval = collection["flush_interval"]
# compact_interval = collection["compact_interval"]
during_time = collection["during_time"]
if not milvus_instance.exists_collection():
logger.error(milvus_instance.show_collections())
logger.error("Table name: %s not existed" % collection_name)
return
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
i = 0
ids = []
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)]
while time.time() < start_time + during_time * 60:
i = i + 1
for j in range(insert_interval):
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
search_param = {}
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors[0:nq], top_k, search_param=search_param)
count = milvus_instance.count()
insert_ids = [(count+x) for x in range(len(insert_vectors))]
ids.extend(insert_ids)
status, res = milvus_instance.insert(insert_vectors, ids=insert_ids)
logger.debug("%d, row_count: %d" % (i, milvus_instance.count()))
milvus_instance.delete_vectors(ids[-delete_xb:])
milvus_instance.flush()
milvus_instance.compact()
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
end_row_count = milvus_instance.count()
metrics = {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage,
"row_count_increments": end_row_count - start_row_count
}
logger.info(metrics)
for index, param in enumerate(run_params):
logger.info("Definition param: %s" % str(param))
table_name = param["dataset"]
index_type = param["index_type"]
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
# set default test time
if "during_time" not in param:
during_time = 100 # seconds
else:
during_time = int(param["during_time"]) * 60
# set default query process num
if "query_process_num" not in param:
query_process_num = 10
else:
query_process_num = int(param["query_process_num"])
milvus = MilvusClient(table_name, ip=self.ip, port=self.port)
logger.debug(milvus.show_tables())
logger.debug(milvus.describe_index())
logger.debug(milvus.count())
# Check has table or not
if not milvus.exists_table():
logger.warning("Table %s not existed, continue exec next params ..." % table_name)
continue
start_time = time.time()
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
i = 0
while time.time() < start_time + during_time:
# processes = []
# # do query
# for i in range(query_process_num):
# milvus_instance = MilvusClient(table_name)
# top_k = random.choice([x for x in range(1, 100)])
# nq = random.choice([x for x in range(1, 1000)])
# nprobe = random.choice([x for x in range(1, 500)])
# logger.info(nprobe)
# p = Process(target=self.do_query, args=(milvus_instance, table_name, [top_k], [nq], 64, run_count, ))
# processes.append(p)
# p.start()
# time.sleep(0.1)
# for p in processes:
# p.join()
i = i + 1
milvus_instance = MilvusClient(table_name, ip=self.ip, port=self.port)
top_ks = random.sample([x for x in range(1, 100)], 1)
nqs = random.sample([x for x in range(1, 200)], 2)
nprobe = random.choice([x for x in range(1, 100)])
res = self.do_query(milvus_instance, table_name, top_ks, nqs, nprobe, run_count)
# milvus_instance = MilvusClient(table_name)
status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))])
if not status.OK():
logger.error(status.message)
logger.debug(milvus.count())
res = self.do_query(milvus_instance, table_name, top_ks, nqs, nprobe, run_count)
# status = milvus_instance.create_index(index_type, 16384)
else:
logger.warning("Run type not defined")
return
logger.debug("Test finished")

View File

@ -5,26 +5,19 @@ import pdb
import argparse
import logging
import traceback
from multiprocessing import Process
from queue import Queue
from logging import handlers
from yaml import full_load, dump
from parser import operations_parser
from local_runner import LocalRunner
from docker_runner import DockerRunner
from k8s_runner import K8sRunner
import parser
DEFAULT_IMAGE = "milvusdb/milvus:latest"
LOG_FOLDER = "logs"
NAMESPACE = "milvus"
# formatter = logging.Formatter('[%(asctime)s] [%(levelname)-4s] [%(pathname)s:%(lineno)d] %(message)s')
# if not os.path.exists(LOG_FOLDER):
# os.system('mkdir -p %s' % LOG_FOLDER)
# fileTimeHandler = handlers.TimedRotatingFileHandler(os.path.join(LOG_FOLDER, 'milvus_benchmark'), "D", 1, 10)
# fileTimeHandler.suffix = "%Y%m%d.log"
# fileTimeHandler.setFormatter(formatter)
# logging.basicConfig(level=logging.DEBUG)
# fileTimeHandler.setFormatter(formatter)
# logger.addHandler(fileTimeHandler)
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
@ -42,96 +35,36 @@ def positive_int(s):
return i
# # link random_data if not exists
# def init_env():
# if not os.path.islink(BINARY_DATA_FOLDER):
# try:
# os.symlink(SRC_BINARY_DATA_FOLDER, BINARY_DATA_FOLDER)
# except Exception as e:
# logger.error("Create link failed: %s" % str(e))
# sys.exit()
def get_image_tag(image_version, image_type):
return "%s-%s-centos7-release" % (image_version, image_type)
# return "%s-%s-centos7-release" % ("0.7.1", image_type)
# return "%s-%s-centos7-release" % ("PR-2159", image_type)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--hostname",
default="eros",
help="server host name")
parser.add_argument(
"--image-tag",
default="",
help="image tag")
parser.add_argument(
"--image-type",
default="",
help="image type")
# parser.add_argument(
# "--run-count",
# default=1,
# type=positive_int,
# help="run times for each test")
# # performance / stability / accuracy test
# parser.add_argument(
# "--run-type",
# default="search_performance",
# help="run type, default performance")
parser.add_argument(
'--suite',
metavar='FILE',
help='load test suite from FILE',
default='suites/suite.yaml')
parser.add_argument(
'--local',
action='store_true',
help='use local milvus server')
parser.add_argument(
'--host',
help='server host ip param for local mode',
default='127.0.0.1')
parser.add_argument(
'--port',
help='server port param for local mode',
default='19530')
def queue_worker(queue):
while not queue.empty():
q = queue.get()
suite = q["suite"]
server_host = q["server_host"]
image_type = q["image_type"]
image_tag = q["image_tag"]
args = parser.parse_args()
# Get all benchmark test suites
if args.suite:
with open(args.suite) as f:
with open(suite) as f:
suite_dict = full_load(f)
f.close()
# With definition order
run_type, run_params = operations_parser(suite_dict)
logger.debug(suite_dict)
# init_env()
# run_params = {"run_count": args.run_count}
if args.image_tag:
namespace = "milvus"
logger.debug(args)
# for docker mode
if args.local:
logger.error("Local mode and docker mode are incompatible")
sys.exit(-1)
# Docker pull image
# if not utils.pull_image(args.image):
# raise Exception('Image %s pull failed' % image)
# TODO: Check milvus server port is available
# logger.info("Init: remove all containers created with image: %s" % args.image)
# utils.remove_all_containers(args.image)
# runner = DockerRunner(args)
tables = run_params["tables"]
for table in tables:
run_type, run_params = parser.operations_parser(suite_dict)
collections = run_params["collections"]
for collection in collections:
# run tests
server_config = table["server"]
server_config = collection["server"]
logger.debug(server_config)
runner = K8sRunner()
if runner.init_env(server_config, args):
if runner.init_env(server_config, server_host, image_type, image_tag):
logger.debug("Start run tests")
try:
runner.run(run_type, table)
runner.run(run_type, collection)
except Exception as e:
logger.error(str(e))
logger.error(traceback.format_exc())
@ -139,24 +72,101 @@ def main():
runner.clean_up()
else:
logger.error("Runner init failed")
# for operation_type in operations:
# logger.info("Start run test, test type: %s" % operation_type)
# run_params["params"] = operations[operation_type]
# runner.run({operation_type: run_params}, run_type=args.run_type)
# logger.info("Run params: %s" % str(run_params))
logger.debug("All task finished in queue: %s" % server_host)
if args.local:
def main():
arg_parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# helm mode with scheduler
arg_parser.add_argument(
"--image-version",
default="",
help="image version")
arg_parser.add_argument(
"--schedule-conf",
metavar='FILE',
default='',
help="load test schedule from FILE")
# local mode
arg_parser.add_argument(
'--local',
action='store_true',
help='use local milvus server')
arg_parser.add_argument(
'--host',
help='server host ip param for local mode',
default='127.0.0.1')
arg_parser.add_argument(
'--port',
help='server port param for local mode',
default='19530')
arg_parser.add_argument(
'--suite',
metavar='FILE',
help='load test suite from FILE',
default='')
args = arg_parser.parse_args()
if args.schedule_conf:
if args.local:
raise Exception("Helm mode with scheduler and other mode are incompatible")
if not args.image_version:
raise Exception("Image version not given")
image_version = args.image_version
with open(args.schedule_conf) as f:
schedule_config = full_load(f)
f.close()
queues = []
server_names = set()
for item in schedule_config:
server_host = item["server"]
suite_params = item["suite_params"]
server_names.add(server_host)
q = Queue()
for suite_param in suite_params:
suite = "suites/"+suite_param["suite"]
image_type = suite_param["image_type"]
image_tag = get_image_tag(image_version, image_type)
q.put({
"suite": suite,
"server_host": server_host,
"image_tag": image_tag,
"image_type": image_type
})
queues.append(q)
logger.debug(server_names)
thread_num = len(server_names)
processes = []
for i in range(thread_num):
x = Process(target=queue_worker, args=(queues[i], ))
processes.append(x)
x.start()
time.sleep(5)
for x in processes:
x.join()
elif args.local:
# for local mode
host = args.host
port = args.port
suite = args.suite
with open(suite) as f:
suite_dict = full_load(f)
f.close()
logger.debug(suite_dict)
run_type, run_params = parser.operations_parser(suite_dict)
collections = run_params["collections"]
if len(collections) > 1:
raise Exception("Multi collections not supported in Local Mode")
collection = collections[0]
runner = LocalRunner(host, port)
for operation_type in operations:
logger.info("Start run local mode test, test type: %s" % operation_type)
run_params["params"] = operations[operation_type]
runner.run({operation_type: run_params}, run_type=args.run_type)
logger.info("Run params: %s" % str(run_params))
logger.info("Start run local mode test, test type: %s" % run_type)
runner.run(run_type, collection)
if __name__ == "__main__":
main()
main()

View File

@ -12,29 +12,29 @@ def operations_parser(operations):
return (run_type, run_params)
def table_parser(table_name):
tmp = table_name.split("_")
def collection_parser(collection_name):
tmp = collection_name.split("_")
# if len(tmp) != 5:
# return None
data_type = tmp[0]
table_size_unit = tmp[1][-1]
table_size = tmp[1][0:-1]
if table_size_unit == "m":
table_size = int(table_size) * 1000000
elif table_size_unit == "b":
table_size = int(table_size) * 1000000000
collection_size_unit = tmp[1][-1]
collection_size = tmp[1][0:-1]
if collection_size_unit == "m":
collection_size = int(collection_size) * 1000000
elif collection_size_unit == "b":
collection_size = int(collection_size) * 1000000000
index_file_size = int(tmp[2])
dimension = int(tmp[3])
metric_type = str(tmp[4])
return (data_type, table_size, index_file_size, dimension, metric_type)
return (data_type, collection_size, index_file_size, dimension, metric_type)
def parse_ann_table_name(table_name):
data_type = table_name.split("_")[0]
dimension = int(table_name.split("_")[1])
metric = table_name.split("_")[-1]
# metric = table_name.attrs['distance']
# dimension = len(table_name["train"][0])
def parse_ann_collection_name(collection_name):
data_type = collection_name.split("_")[0]
dimension = int(collection_name.split("_")[1])
metric = collection_name.split("_")[-1]
# metric = collection_name.attrs['distance']
# dimension = len(collection_name["train"][0])
if metric == "euclidean":
metric_type = "l2"
elif metric == "angular":

View File

@ -1,5 +1,5 @@
numpy==1.16.3
pymilvus-test>=0.2.0
scipy==1.3.1
scikit-learn==0.19.1
h5py==2.7.1
# influxdb==5.2.2
@ -8,3 +8,4 @@ tableprint==0.8.0
ansicolors==1.1.8
scipy==1.3.1
kubernetes==10.0.1
# rq==1.2.0

View File

@ -4,6 +4,7 @@ import pdb
import time
import random
from multiprocessing import Process
from itertools import product
import numpy as np
import sklearn.preprocessing
from client import MilvusClient
@ -25,6 +26,7 @@ SIFT_SRC_DATA_DIR = '/test/milvus/raw_data/sift1b/'
DEEP_SRC_DATA_DIR = '/test/milvus/raw_data/deep1b/'
JACCARD_SRC_DATA_DIR = '/test/milvus/raw_data/jaccard/'
HAMMING_SRC_DATA_DIR = '/test/milvus/raw_data/jaccard/'
STRUCTURE_SRC_DATA_DIR = '/test/milvus/raw_data/jaccard/'
SIFT_SRC_GROUNDTRUTH_DATA_DIR = SIFT_SRC_DATA_DIR + 'gnd'
WARM_TOP_K = 1
@ -59,6 +61,8 @@ def gen_file_name(idx, dimension, data_type):
fname = JACCARD_SRC_DATA_DIR+fname
elif data_type == "hamming":
fname = HAMMING_SRC_DATA_DIR+fname
elif data_type == "sub" or data_type == "super":
fname = STRUCTURE_SRC_DATA_DIR+fname
return fname
@ -76,6 +80,8 @@ def get_vectors_from_binary(nq, dimension, data_type):
file_name = JACCARD_SRC_DATA_DIR+'query.npy'
elif data_type == "hamming":
file_name = HAMMING_SRC_DATA_DIR+'query.npy'
elif data_type == "sub" or data_type == "super":
file_name = STRUCTURE_SRC_DATA_DIR+'query.npy'
data = np.load(file_name)
vectors = data[0:nq].tolist()
return vectors
@ -92,7 +98,7 @@ class Runner(object):
X = X.astype(np.float32)
elif metric_type == "l2":
X = X.astype(np.float32)
elif metric_type == "jaccard" or metric_type == "hamming":
elif metric_type in ["jaccard", "hamming", "sub", "super"]:
tmp = []
for index, item in enumerate(X):
new_vector = bytes(np.packbits(item, axis=-1).tolist())
@ -100,11 +106,26 @@ class Runner(object):
X = tmp
return X
def do_insert(self, milvus, table_name, data_type, dimension, size, ni):
def generate_combinations(self, args):
if isinstance(args, list):
args = [el if isinstance(el, list) else [el] for el in args]
return [list(x) for x in product(*args)]
elif isinstance(args, dict):
flat = []
for k, v in args.items():
if isinstance(v, list):
flat.append([(k, el) for el in v])
else:
flat.append([(k, v)])
return [dict(x) for x in product(*flat)]
else:
raise TypeError("No args handling exists for %s" % type(args).__name__)
def do_insert(self, milvus, collection_name, data_type, dimension, size, ni):
'''
@params:
mivlus: server connect instance
dimension: table dimensionn
dimension: collection dimensionn
# index_file_size: size trigger file merge
size: row count of vectors to be insert
ni: row count of vectors to be insert each time
@ -127,12 +148,12 @@ class Runner(object):
vectors_per_file = 10000
elif data_type == "sift":
vectors_per_file = SIFT_VECTORS_PER_FILE
elif data_type == "jaccard" or data_type == "hamming":
elif data_type in ["jaccard", "hamming", "sub", "super"]:
vectors_per_file = JACCARD_VECTORS_PER_FILE
else:
raise Exception("data_type: %s not supported" % data_type)
if size % vectors_per_file or ni > vectors_per_file:
raise Exception("Not invalid table size or ni")
raise Exception("Not invalid collection size or ni")
file_num = size // vectors_per_file
for i in range(file_num):
file_name = gen_file_name(i, dimension, data_type)
@ -150,6 +171,8 @@ class Runner(object):
logger.info("Start id: %s, end id: %s" % (start_id, end_id))
ids = [k for k in range(start_id, end_id)]
status, ids = milvus.insert(vectors, ids=ids)
# milvus.flush()
logger.debug(milvus.count())
ni_end_time = time.time()
total_time = total_time + ni_end_time - ni_start_time
@ -160,9 +183,9 @@ class Runner(object):
bi_res["ni_time"] = ni_time
return bi_res
def do_query(self, milvus, table_name, top_ks, nqs, nprobe, run_count=1):
def do_query(self, milvus, collection_name, top_ks, nqs, run_count=1, search_param=None):
bi_res = []
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
for nq in nqs:
tmp_res = []
@ -174,7 +197,7 @@ class Runner(object):
for i in range(run_count):
logger.info("Start run query, run %d of %s" % (i+1, run_count))
start_time = time.time()
query_res = milvus.query(vectors, top_k, nprobe)
query_res = milvus.query(vectors, top_k, search_param=search_param)
interval_time = time.time() - start_time
if (i == 0) or (min_query_time > interval_time):
min_query_time = interval_time
@ -183,12 +206,18 @@ class Runner(object):
bi_res.append(tmp_res)
return bi_res
def do_query_ids(self, milvus, table_name, top_k, nq, nprobe):
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
def do_query_qps(self, milvus, query_vectors, top_k, search_param):
start_time = time.time()
result = milvus.query(query_vectors, top_k, search_param)
end_time = time.time()
return end_time - start_time
def do_query_ids(self, milvus, collection_name, top_k, nq, search_param=None):
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
vectors = base_query_vectors[0:nq]
logger.info("Start query, query params: top-k: {}, nq: {}, actually length of vectors: {}".format(top_k, nq, len(vectors)))
query_res = milvus.query(vectors, top_k, nprobe)
query_res = milvus.query(vectors, top_k, search_param=search_param)
result_ids = []
result_distances = []
for result in query_res:
@ -201,12 +230,12 @@ class Runner(object):
result_distances.append(tmp_distance)
return result_ids, result_distances
def do_query_acc(self, milvus, table_name, top_k, nq, nprobe, id_store_name):
(data_type, table_size, index_file_size, dimension, metric_type) = parser.table_parser(table_name)
def do_query_acc(self, milvus, collection_name, top_k, nq, id_store_name, search_param=None):
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
base_query_vectors = get_vectors_from_binary(MAX_NQ, dimension, data_type)
vectors = base_query_vectors[0:nq]
logger.info("Start query, query params: top-k: {}, nq: {}, actually length of vectors: {}".format(top_k, nq, len(vectors)))
query_res = milvus.query(vectors, top_k, nprobe)
query_res = milvus.query(vectors, top_k, search_param=None)
# if file existed, cover it
if os.path.isfile(id_store_name):
os.remove(id_store_name)
@ -250,10 +279,10 @@ class Runner(object):
Implementation based on:
https://github.com/facebookresearch/faiss/blob/master/benchs/datasets.py
"""
def get_groundtruth_ids(self, table_size):
fname = GROUNDTRUTH_MAP[str(table_size)]
def get_groundtruth_ids(self, collection_size):
fname = GROUNDTRUTH_MAP[str(collection_size)]
fname = SIFT_SRC_GROUNDTRUTH_DATA_DIR + "/" + fname
a = np.fromfile(fname, dtype='int32')
d = a[0]
true_ids = a.reshape(-1, d + 1)[:, 1:].copy()
return true_ids
return true_ids

View File

@ -0,0 +1,53 @@
[
{
"server": "apollo",
"suite_params": [
{
"suite": "cpu_accuracy_ann.yaml",
"image_type": "cpu"
}
]
},
{
"server": "poseidon",
"suite_params": [
{
"suite": "gpu_search_performance.yaml",
"image_type": "gpu"
},
{
"suite": "cpu_search_performance.yaml",
"image_type": "cpu"
},
{
"suite": "insert_performance.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_accuracy.yaml",
"image_type": "gpu"
}
]
},
{
"server": "eros",
"suite_params": [
{
"suite": "gpu_accuracy_ann.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_search_stability.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_build_performance.yaml",
"image_type": "gpu"
},
{
"suite": "cpu_build_performance.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,29 @@
[
{
"server": "apollo",
"suite_params": [
{
"suite": "cpu_accuracy_ann.yaml",
"image_type": "cpu"
}
]
},
{
"server": "poseidon",
"suite_params": [
{
"suite": "cpu_search_performance.yaml",
"image_type": "cpu"
}
]
},
{
"server": "eros",
"suite_params": [
{
"suite": "cpu_build_performance.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,58 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "gpu_accuracy.yaml",
"image_type": "gpu"
}
]
},
{
"server": "poseidon",
"suite_params": [
{
"suite": "070_gpu_search.yaml",
"image_type": "gpu"
},
{
"suite": "070_cpu_search.yaml",
"image_type": "cpu"
},
{
"suite": "070_gpu_build.yaml",
"image_type": "gpu"
},
{
"suite": "cpu_accuracy.yaml",
"image_type": "cpu"
}
]
},
{
"server": "apollo",
"suite_params": [
{
"suite": "cpu_accuracy_ann.yaml",
"image_type": "cpu"
},
{
"suite": "070_cpu_search_stability.yaml",
"image_type": "cpu"
}
]
},
{
"server": "eros",
"suite_params": [
{
"suite": "070_gpu_stability.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_accuracy_ann.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "gpu_accuracy_ann.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,63 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "080_gpu_accuracy.yaml",
"image_type": "gpu"
}
]
},
{
"server": "poseidon",
"suite_params": [
{
"suite": "080_gpu_search.yaml",
"image_type": "gpu"
},
{
"suite": "080_cpu_search.yaml",
"image_type": "cpu"
},
{
"suite": "080_gpu_build.yaml",
"image_type": "gpu"
},
{
"suite": "080_cpu_accuracy.yaml",
"image_type": "cpu"
},
{
"suite": "080_cpu_build.yaml",
"image_type": "cpu"
}
]
},
{
"server": "apollo",
"suite_params": [
{
"suite": "cpu_accuracy_ann.yaml",
"image_type": "cpu"
},
{
"suite": "080_cpu_search_stability.yaml",
"image_type": "cpu"
}
]
},
{
"server": "eros",
"suite_params": [
{
"suite": "gpu_accuracy_ann.yaml",
"image_type": "gpu"
},
{
"suite": "080_gpu_stability.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,15 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "crud_add.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_accuracy_sift1m.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "eros",
"suite_params": [
{
"suite": "070_ann.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,15 @@
[
{
"server": "apollo",
"suite_params": [
{
"suite": "070_insert_10m.yaml",
"image_type": "cpu"
},
{
"suite": "070_cpu_build.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "add_flush_performance.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "debug_build.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "clean.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "crud_add.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,12 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "070_gpu_build.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "apollo",
"suite_params": [
{
"suite": "crud_add_flush.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "crud_search.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "eros",
"suite_params": [
{
"suite": "070_stability.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "debug.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,53 @@
[
{
"server": "apollo",
"suite_params": [
{
"suite": "cpu_accuracy_ann.yaml",
"image_type": "cpu"
}
]
},
{
"server": "poseidon",
"suite_params": [
{
"suite": "gpu_search_performance.yaml",
"image_type": "gpu"
},
{
"suite": "cpu_search_performance.yaml",
"image_type": "cpu"
},
{
"suite": "insert_performance.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_accuracy.yaml",
"image_type": "gpu"
}
]
},
{
"server": "eros",
"suite_params": [
{
"suite": "gpu_accuracy_ann.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_search_stability.yaml",
"image_type": "gpu"
},
{
"suite": "gpu_build_performance.yaml",
"image_type": "gpu"
},
{
"suite": "cpu_build_performance.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "eros",
"suite_params": [
{
"suite": "080_gpu_search_id.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "file_size.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "cpu_accuracy_ann_crud_debug.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "insert_performance.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "cpu_search_performance_jaccard.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,27 @@
[
{
"server": "poseidon",
"suite_params": [
{
"suite": "080_gpu_search.yaml",
"image_type": "gpu"
},
{
"suite": "080_cpu_search.yaml",
"image_type": "cpu"
},
{
"suite": "080_gpu_build.yaml",
"image_type": "gpu"
},
{
"suite": "080_cpu_accuracy.yaml",
"image_type": "cpu"
},
{
"suite": "080_cpu_build.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "eros",
"suite_params": [
{
"suite": "debug.yaml",
"image_type": "cpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "070_gpu_search.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "athena",
"suite_params": [
{
"suite": "insert_performance_sift1b.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -0,0 +1,11 @@
[
{
"server": "eros",
"suite_params": [
{
"suite": "gpu_search_stability.yaml",
"image_type": "gpu"
}
]
}
]

View File

@ -1,57 +0,0 @@
accuracy:
# interface: search_vectors
query:
[
{
"dataset": "random_50m_1024_512_ip",
"index.index_types": ["flat", "ivf_flat"],
"index.nlists": [16384],
"index.metric_types": ["ip"],
"nprobes": [8, 64, 128, 512, 2048],
"top_ks": [64],
"nqs": [1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 120,
"server.gpu_cache_capacity": 6,
"server.enable_gpu": True,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_ivf",
"sift_acc": false
},
{
"dataset": "random_50m_1024_512_ip",
"index.index_types": ["flat", "ivf_sq8"],
"index.nlists": [16384],
"index.metric_types": ["ip"],
"nprobes": [8, 64, 128, 512, 2048],
"top_ks": [64],
"nqs": [1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 120,
"server.gpu_cache_capacity": 6,
"server.enable_gpu": True,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_sq8",
"sift_acc": false
},
{
"dataset": "random_50m_1024_512_ip",
"index.index_types": ["flat", "ivf_sq8h"],
"index.nlists": [16384],
"index.metric_types": ["ip"],
"nprobes": [8, 64, 128, 512, 2048],
"top_ks": [64],
"nqs": [1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 120,
"server.gpu_cache_capacity": 6,
"server.enable_gpu": True,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip",
"sift_acc": false
},
]

View File

@ -1,75 +0,0 @@
accuracy:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 50,
# "server.use_gpu_threshold": 1001,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_sq8h",
# "sift_acc": true
# },
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_sq8",
# "sift_acc": true
# },
{
"dataset": "sift_10m_1024_128_l2",
"index.index_types": ["ivf_pq"],
"index.nlists": [16384],
"index.metric_types": ["l2"],
"nprobes": [1, 8, 16, 32, 64, 128, 256],
"top_ks": [64],
"nqs": [1000],
"server.cpu_cache_capacity": 50,
"server.resources": ["cpu", "gpu0"],
"db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_sq8",
"sift_acc": true
},
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 100,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_ivf",
# "sift_acc": true
# },
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["nsg"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1],
# "top_ks": [64],
# "nqs": [1000],
# # "server.cpu_cache_capacity": 100,
# # "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_10m_1024_128_l2_nsg",
# "sift_acc": true
# },
]

View File

@ -1,73 +0,0 @@
accuracy:
# interface: search_vectors
query:
[
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# # "nprobes": [1, 8, 32, 64, 128],
# "nprobes": [1, 8, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1001,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_1b_2048_128_l2",
# "sift_acc": true
# },
{
"dataset": "sift_1b_2048_128_l2",
"index.index_types": ["ivf_pq"],
"index.nlists": [16384],
"index.metric_types": ["l2"],
"nprobes": [1, 2, 4, 8, 16, 32, 64, 128, 256, 512],
"top_ks": [64],
"nqs": [1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 10001,
"server.cpu_cache_capacity": 150,
"server.gpu_cache_capacity": 6,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2_pq",
"sift_acc": true
},
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 32, 64, 128],
# "top_ks": [64],
# "nqs": [1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2",
# "sift_acc": true
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 32, 64, 128],
# "top_ks": [64],
# "nqs": [1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2",
# "sift_acc": true
# },
]

View File

@ -1,21 +0,0 @@
accuracy:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
{
"dataset": "sift_500m_1024_128_l2",
"index.index_types": ["ivf_sq8h"],
"index.nlists": [16384],
"index.metric_types": ["l2"],
"nprobes": [1, 8, 16, 32, 64, 128, 256],
"top_ks": [64],
"nqs": [1000],
"server.cpu_cache_capacity": 200,
"server.resources": ["cpu", "gpu0"],
"db_path_prefix": "/test/milvus/db_data/sift_500m_1024_128_l2",
"sift_acc": true
},
]

View File

@ -1,76 +0,0 @@
accuracy:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 50,
# "server.use_gpu_threshold": 1001,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data/sift_50m_1024_128_l2_sq8h",
# "sift_acc": true
# },
{
"dataset": "sift_50m_1024_128_l2",
"index.index_types": ["ivf_sq8"],
"index.nlists": [16384],
"index.metric_types": ["l2"],
"nprobes": [1, 8, 16, 32, 64, 128, 256],
"top_ks": [64],
"nqs": [1000],
"server.cpu_cache_capacity": 50,
"server.resources": ["cpu", "gpu0"],
"db_path_prefix": "/test/milvus/db_data/sift_50m_1024_128_l2_sq8",
"sift_acc": true
},
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 100,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data/sift_50m_1024_128_l2_ivf",
# "sift_acc": true
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1, 8, 16, 32, 64, 128, 256],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 100,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf",
# "sift_acc": true
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["nsg"],
# "index.nlists": [16384],
# "index.metric_types": ["l2"],
# "nprobes": [1],
# "top_ks": [64],
# "nqs": [1000],
# "server.cpu_cache_capacity": 100,
# "server.resources": ["gpu0"],
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg",
# "sift_acc": true
# },
]

View File

@ -1,119 +0,0 @@
accuracy:
tables:
# sift1b
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
# sift50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 60
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
cache_config.cpu_cache_capacity: 100
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1]
top_ks: [64]
nqs: [1000]

View File

@ -1,5 +1,78 @@
ann_accuracy:
tables:
collections:
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
collection_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8']
index_params:
nlist: [16384]
top_ks: [10]
nqs: [10000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
collection_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['ivf_pq']
index_params:
nlist: [16384]
m: [32]
top_ks: [10]
nqs: [10000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
collection_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['annoy']
index_params:
n_trees: [8, 32]
top_ks: [10]
nqs: [10000]
search_params:
search_k: [50, 100, 500, 1000]
-
server:
cache_config.cpu_cache_capacity: 16
@ -14,42 +87,23 @@ ann_accuracy:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
table_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_pq', 'nsg']
nlists: [16384]
collection_name: sift_128_euclidean
index_file_sizes: [256]
index_types: ['hnsw']
index_params:
M: [16]
efConstruction: [500]
top_ks: [10]
nqs: [10000]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
ef: [16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/gist-960-euclidean.hdf5
table_name: gist_960_euclidean
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_pq', 'nsg']
nlists: [16384]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
@ -58,11 +112,62 @@ ann_accuracy:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
table_name: glove_200_angular
collection_name: glove_200_angular
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_pq', 'nsg']
nlists: [16384]
index_types: ['flat', 'ivf_flat', 'ivf_sq8']
index_params:
nlist: [16384]
top_ks: [10]
nqs: [10000]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
collection_name: glove_200_angular
index_file_sizes: [1024]
index_types: ['ivf_pq']
index_params:
nlist: [16384]
m: [20]
top_ks: [10]
nqs: [10000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
collection_name: glove_200_angular
index_file_sizes: [256]
index_types: ['hnsw']
index_params:
M: [36]
efConstruction: [500]
top_ks: [10]
nqs: [10000]
search_params:
ef: [10, 16, 32, 64, 128, 256, 512]

View File

@ -1,36 +0,0 @@
build_performance:
tables:
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
# cache_config.cpu_cache_capacity: 32
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: false
# gpu_resource_config.cache_capacity: 6
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# index_type: ivf_sq8
# nlist: 4096
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 6
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
index_type: ivf_sq8
nlist: 8192

View File

@ -1,169 +0,0 @@
search_performance:
tables:
# sift_50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
cache_config.cpu_cache_capacity: 50
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# random_50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_ivf
cache_config.cpu_cache_capacity: 110
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_nsg
cache_config.cpu_cache_capacity: 200
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# sift_1b
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,20 +0,0 @@
search_stability:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
cache_config.cpu_cache_capacity: 50
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 100
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
during_time: 240
search_params:
nprobes: 1-200
top_ks: 1-200
nqs: 1-200

View File

@ -1,27 +0,0 @@
stability:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192_stability
cache_config.cpu_cache_capacity: 64
cache_config.cache_insert_data: true
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 100
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
during_time: 480
search_params:
nprobes: 1-200
top_ks: 1-200
nqs: 1-200
# length of insert vectors
insert_xb: 100000
# insert after search 4 times
insert_interval: 4

View File

@ -1,157 +0,0 @@
accuracy:
tables:
# sift1b
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
# cache_config.cpu_cache_capacity: 150
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_1b_2048_128_l2
# search_params:
# nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# top_ks: [64]
# nqs: [1000]
# sift50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 60
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
# cache_config.cpu_cache_capacity: 30
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# search_params:
# nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# top_ks: [64]
# nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
cache_config.cpu_cache_capacity: 100
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1]
top_ks: [64]
nqs: [1000]

View File

@ -1,5 +1,5 @@
ann_accuracy:
tables:
collections:
-
server:
cache_config.cpu_cache_capacity: 16
@ -14,14 +14,16 @@ ann_accuracy:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
table_name: sift_128_euclidean
collection_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h', 'ivf_pq', 'nsg']
nlists: [16384]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
index_params:
nlist: [16384]
top_ks: [10]
nqs: [10000]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
@ -35,15 +37,67 @@ ann_accuracy:
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/gist-960-euclidean.hdf5
table_name: gist_960_euclidean
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
collection_name: sift_128_euclidean
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h', 'ivf_pq', 'nsg']
nlists: [16384]
index_types: ['ivf_pq']
index_params:
nlist: [16384]
m: [32]
top_ks: [10]
nqs: [10000]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5
collection_name: sift_128_euclidean
index_file_sizes: [256]
index_types: ['hnsw']
index_params:
M: [16]
efConstruction: [500]
top_ks: [10]
nqs: [10000]
search_params:
ef: [16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
collection_name: glove_200_angular
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h']
index_params:
nlist: [16384]
top_ks: [10]
nqs: [10000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
@ -58,11 +112,61 @@ ann_accuracy:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5
table_name: glove_200_angular
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h', 'nsg']
nlists: [16384]
collection_name: glove_200_angular
index_file_sizes: [256]
index_types: ['hnsw']
index_params:
M: [36]
efConstruction: [500]
top_ks: [10]
nqs: [10000]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
ef: [10, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5
collection_name: kosarak_27984_jaccard
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat']
index_params:
nlist: [2048]
top_ks: [10]
nqs: [10000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5
collection_name: sift_256_hamming
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat']
index_params:
nlist: [2048]
top_ks: [100]
nqs: [1000]
search_params:
nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]

View File

@ -1,47 +0,0 @@
ann_accuracy:
tables:
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5
table_name: kosarak_27984_jaccard
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat']
nlists: [2048]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [10]
nqs: [10000]
-
server:
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5
table_name: sift_256_hamming
index_file_sizes: [1024]
index_types: ['flat', 'ivf_flat']
nlists: [2048]
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [100]
nqs: [1000]

View File

@ -1,59 +0,0 @@
accuracy:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]

View File

@ -1,40 +0,0 @@
accuracy:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
top_ks: [64]
nqs: [1000]

View File

@ -1,80 +0,0 @@
accuracy:
tables:
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
# cache_config.cpu_cache_capacity: 30
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# search_params:
# nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# top_ks: [64]
# nqs: [1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
# cache_config.cpu_cache_capacity: 30
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# search_params:
# nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
# top_ks: [64]
# nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192]
top_ks: [64]
nqs: [1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
search_params:
nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
top_ks: [64]
nqs: [1000]

View File

@ -1,36 +0,0 @@
build_performance:
tables:
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8h
# cache_config.cpu_cache_capacity: 16
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: random_50m_1024_512_ip
# index_type: ivf_sq8h
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 6
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
index_type: ivf_sq8
nlist: 4096

View File

@ -1,36 +0,0 @@
build_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/hamming_50m_128_512_hamming_ivf
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: hamming_50m_128_512_hamming
index_type: ivf_flat
nlist: 2048
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/hamming_50m_128_512_hamming_flat
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: hamming_50m_128_512_hamming
index_type: flat
nlist: 2048

View File

@ -1,36 +0,0 @@
build_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_ivf
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: jaccard_50m_128_512_jaccard
index_type: ivf_flat
nlist: 2048
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_flat
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: jaccard_50m_128_512_jaccard
index_type: flat
nlist: 2048

View File

@ -1,247 +0,0 @@
search_performance:
tables:
# sift_50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
# cache_config.cpu_cache_capacity: 32
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
cache_config.cpu_cache_capacity: 50
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# random_50m
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_ivf
cache_config.cpu_cache_capacity: 110
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8h
cache_config.cpu_cache_capacity: 30
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_nsg
cache_config.cpu_cache_capacity: 200
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 6
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: random_50m_1024_512_ip
run_count: 2
search_params:
nprobes: [8]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# sift_1b
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
# cache_config.cpu_cache_capacity: 150
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_1b_2048_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,22 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/hamming_50m_128_512_hamming_ivf
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: hamming_50m_128_512_hamming
run_count: 1
search_params:
nprobes: [8, 32]
top_ks: [1, 16, 64, 128, 256, 512, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,22 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_ivf
cache_config.cpu_cache_capacity: 32
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: jaccard_50m_128_512_jaccard
run_count: 1
search_params:
nprobes: [8, 32]
top_ks: [1, 16, 64, 128, 256, 512, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,82 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/random_50m_2048_512_ip_sq8
cache_config.cpu_cache_capacity: 110
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: random_50m_2048_512_ip
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8
# cache_config.cpu_cache_capacity: 30
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: random_50m_2048_512_ip
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8h
# cache_config.cpu_cache_capacity: 30
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: random_50m_1024_512_ip
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_nsg
# cache_config.cpu_cache_capacity: 200
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: random_50m_1024_512_ip
# run_count: 2
# search_params:
# nprobes: [8]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,62 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq
cache_config.cpu_cache_capacity: 150
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1b_2048_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,42 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
run_count: 1
search_params:
nprobes: [8, 32]
top_ks: [1, 16, 64, 128, 256, 512, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
run_count: 1
search_params:
nprobes: [8, 32]
top_ks: [1, 16, 64, 128, 256, 512, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,146 +0,0 @@
search_performance:
tables:
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf
# cache_config.cpu_cache_capacity: 32
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
# cache_config.cpu_cache_capacity: 16
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h
# cache_config.cpu_cache_capacity: 16
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# git issue num: #626
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq
# cache_config.cpu_cache_capacity: 32
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8, 32]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg
# cache_config.cpu_cache_capacity: 50
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 200
# gpu_resource_config.enable: true
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# - gpu1
# gpu_resource_config.build_index_resources:
# - gpu0
# - gpu1
# table_name: sift_50m_1024_128_l2
# run_count: 2
# search_params:
# nprobes: [8]
# top_ks: [1, 10, 100, 1000]
# nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
cache_config.cpu_cache_capacity: 16
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 200
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
run_count: 2
search_params:
nprobes: [8, 32]
top_ks: [1, 10, 100, 1000]
nqs: [1, 10, 100, 200, 500, 1000]

View File

@ -1,23 +0,0 @@
search_stability:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8
cache_config.cpu_cache_capacity: 50
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 100
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
- gpu2
- gpu3
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
during_time: 240
search_params:
nprobes: 1-200
top_ks: 1-200
nqs: 1-200

View File

@ -1,26 +0,0 @@
stability:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_ivf_stability
cache_config.cpu_cache_capacity: 64
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 100
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
during_time: 10
search_params:
nprobes: 1-200
top_ks: 1-200
nqs: 1-200
# length of insert vectors
insert_xb: 10000
# insert after search 3 times
insert_interval: 3

View File

@ -1,27 +0,0 @@
stability:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h_stability
cache_config.cpu_cache_capacity: 64
cache_config.cache_insert_data: true
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 100
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_50m_1024_128_l2
during_time: 480
search_params:
nprobes: 1-200
top_ks: 1-200
nqs: 1-200
# length of insert vectors
insert_xb: 100000
# insert after search 4 times
insert_interval: 4

View File

@ -1,32 +0,0 @@
performance:
# interface: add_vectors
insert:
# index_type: flat/ivf_flat/ivf_sq8/mix_nsg
[
# debug
# data_type / data_size / index_file_size / dimension
# data_type: random / ann_sift
# data_size: 10m / 1b
# {
# "table_name": "random_1m_1024_4096_ip",
# "ni_per": 10000,
# "server.cpu_cache_capacity": 16,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_1m_1024_4096_ip"
# },
# {
# "table_name": "random_1m_1024_16384_ip",
# "ni_per": 10000,
# "server.cpu_cache_capacity": 16,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_1m_1024_16384_ip"
# },
{
"table_name": "random_50m_1024_512_ip",
"ni_per": 100000,
# "server.cpu_cache_capacity": 16,
# "server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data_cpu/random_50m_1024_512_ip_ivf"
},
]

View File

@ -1,19 +0,0 @@
insert_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_5m_512_128_l2_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_5m_512_128_l2
ni_per: 100000
build_index: false
# index_type: ivf_flat
# nlist: 16384

View File

@ -1,87 +0,0 @@
insert_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: deep_1b_1024_96_ip
ni_per: 100000
build_index: false
# index_type: ivf_flat
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: deep_1b_1024_96_ip
ni_per: 100000
build_index: false
# index_type: ivf_sq8
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8h
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: deep_1b_1024_96_ip
ni_per: 100000
build_index: false
# index_type: ivf_sq8h
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_pq
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: deep_1b_1024_96_ip
ni_per: 100000
build_index: false
# index_type: ivf_pq
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_nsg
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: deep_1b_1024_96_ip
ni_per: 100000
build_index: false
# index_type: nsg
# nlist: 16384

View File

@ -1,36 +0,0 @@
insert_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/hamming_50m_128_512_hamming_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: hamming_50m_128_512_hamming
ni_per: 100000
build_index: false
# index_type: ivf_flat
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/hamming_50m_128_512_hamming_flat
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: hamming_50m_128_512_hamming
ni_per: 100000
build_index: false
# index_type: ivf_flat
# nlist: 16384

View File

@ -1,36 +0,0 @@
insert_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: jaccard_50m_128_512_jaccard
ni_per: 100000
build_index: false
# index_type: ivf_flat
# nlist: 16384
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_flat
# cache_config.cpu_cache_capacity: 8
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: false
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# gpu_resource_config.build_index_resources:
# - gpu0
# table_name: jaccard_50m_128_512_jaccard
# ni_per: 100000
# build_index: false
# # index_type: ivf_flat
# # nlist: 16384

View File

@ -1,87 +0,0 @@
insert_performance:
tables:
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_ip_ivf
# cache_config.cpu_cache_capacity: 8
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: false
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# gpu_resource_config.build_index_resources:
# - gpu0
# table_name: sift_50m_1024_128_ip
# ni_per: 100000
# build_index: false
# # index_type: ivf_flat
# # nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
ni_per: 100000
build_index: false
# index_type: ivf_sq8
# nlist: 16384
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: false
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
gpu_resource_config.build_index_resources:
- gpu0
table_name: sift_50m_1024_128_l2
ni_per: 100000
build_index: false
# index_type: ivf_sq8h
# nlist: 16384
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_ip_pq
# cache_config.cpu_cache_capacity: 8
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: false
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# gpu_resource_config.build_index_resources:
# - gpu0
# table_name: sift_50m_1024_128_ip
# ni_per: 100000
# build_index: false
# # index_type: ivf_pq
# # nlist: 16384
# -
# server:
# db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_ip_nsg
# cache_config.cpu_cache_capacity: 8
# engine_config.use_blas_threshold: 1100
# engine_config.gpu_search_threshold: 1
# gpu_resource_config.enable: false
# gpu_resource_config.cache_capacity: 4
# gpu_resource_config.search_resources:
# - gpu0
# gpu_resource_config.build_index_resources:
# - gpu0
# table_name: sift_50m_1024_128_ip
# ni_per: 100000
# build_index: false
# # index_type: nsg
# # nlist: 16384

View File

@ -1,35 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "random_1m_1024_4096_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1100,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_1m_1024_4096_ip_sq8h"
# },
{
"dataset": "random_2m_1024_512_ip",
"index.index_types": ["mix_nsg"],
"index.nlists": [16384],
"nprobes": [8],
"top_ks": [1],
"nqs": [1],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 1100,
"server.cpu_cache_capacity": 50,
"server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": True,
"db_path_prefix": "/test/milvus/db_data/random_1m_1024_4096_ip_sq8"
},
]

View File

@ -1,177 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1000,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8"
# },
{
"dataset": "random_50m_2048_512_ip",
"index.index_types": ["ivf_sq8"],
"index.nlists": [16384],
"nprobes": [8],
"top_ks": [64],
"nqs": [1, 10, 100],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 50,
"server.enable_gpu": True,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data_cpu/random_50m_2048_512_ip_sq8"
},
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1000,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_pq"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.gpu_cache_capacity": 6,
# "server.index_build_device": "gpu0",
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_pq"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.gpu_cache_capacity": 6,
# "server.index_build_device": "gpu0",
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_ivf"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["nsg"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.gpu_cache_capacity": 6,
# "server.index_build_device": "gpu0",
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_nsg"
# }
]

View File

@ -1,83 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_10m_1024_128_l2_ivf"
# },
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_10m_1024_128_l2_sq8"
# },
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_10m_1024_128_l2_sq8h"
# },
{
"dataset": "sift_10m_1024_128_l2",
"index.index_types": ["ivf_pq"],
"index.nlists": [16384],
"nprobes": [8, 32],
"top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
"nqs": [1, 10, 100, 500, 800, 1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 50,
"server.gpu_cache_capacity": 6,
"server.resources": ["gpu0"],
"server.enable_gpu": False,
"db_path_prefix": "/test/milvus/db_data_cpu/sift_10m_1024_128_l2_pq"
},
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["nsg"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_10m_1024_128_l2_nsg"
# },
]

View File

@ -1,123 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2_sq8h"
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2_sq8h"
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2_sq8h"
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1000,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2_sq8h"
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h"
# },
{
"dataset": "sift_1b_2048_128_l2",
"index.index_types": ["ivf_pq"],
"index.nlists": [16384],
"nprobes": [8, 32],
"top_ks": [1, 16, 64, 128, 256, 512, 1000],
"nqs": [1, 10, 100, 200, 500, 1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 10000,
"server.cpu_cache_capacity": 150,
"server.gpu_cache_capacity": 6,
"server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": False,
"db_path_prefix": "/test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq"
},
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": False,
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8"
# },
# {
# "dataset": "sift_1b_2048_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["cpu", "gpu0"],
# "db_path_prefix": "/test/milvus/db_data/sift_1b_2048_128_l2"
# },
]

View File

@ -1,23 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
{
"dataset": "sift_1m_1024_128_l2",
"index.index_types": ["mix_nsg"],
"index.nlists": [16384],
"nprobes": [8, 32],
"top_ks": [1, 16, 64, 128, 256, 512, 1000],
"nqs": [1, 10, 100, 200, 500, 1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 50,
"server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": True,
"db_path_prefix": "/test/milvus/db_data/sift_1m_1024_128_l2_nsg"
},
]

View File

@ -1,50 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "sift_500m_1024_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_500m_1024_128_l2_sq8"
# },
# {
# "dataset": "sift_500m_1024_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 150,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/sift_500m_1024_128_l2_sq8"
# },
{
"dataset": "sift_500m_1024_128_l2",
"index.index_types": ["ivf_sq8h"],
"index.nlists": [16384],
"nprobes": [8, 32],
"top_ks": [1, 100, 200, 1000],
"nqs": [1, 10, 100, 500, 1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 150,
"server.gpu_cache_capacity": 6,
"server.resources": ["gpu0", "gpu1"],
"db_path_prefix": "/test/milvus/db_data/sift_500m_1024_128_l2"
},
]

View File

@ -1,82 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
{
"dataset": "sift_50m_1024_128_l2",
"index.index_types": ["nsg"],
"index.nlists": [16384],
"nprobes": [8],
"top_ks": [64],
"nqs": [1, 10, 100],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 50,
"server.gpu_cache_capacity": 6,
"server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": True,
"db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_nsg"
},
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_ivf"
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 3,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h"
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_sq8"
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["ivf_pq"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 200, 500, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.gpu_cache_capacity": 6,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data_cpu/sift_50m_1024_128_l2_pq"
# },
]

View File

@ -1,22 +0,0 @@
search_performance:
tables:
-
server:
db_config.primary_path: /test/milvus/db_data_gpu/sift_1m_1024_128_l2_ivf
cache_config.cpu_cache_capacity: 8
engine_config.use_blas_threshold: 1100
engine_config.gpu_search_threshold: 1
gpu_resource_config.enable: true
gpu_resource_config.cache_capacity: 4
gpu_resource_config.search_resources:
- gpu0
- gpu1
gpu_resource_config.build_index_resources:
- gpu0
- gpu1
table_name: sift_1m_1024_128_l2
run_count: 3
search_params:
nprobes: [8, 32]
top_ks: [1, 16]
nqs: [1, 10]

View File

@ -1,161 +0,0 @@
performance:
# interface: search_vectors
query:
# dataset: table name you have already created
# key starts with "server." need to reconfig and restart server, including use_blas_threshold/cpu_cache_capacity ..
[
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1000,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["cpu", "gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8h"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_2048_512_ip_sq8"
# },
# {
# "dataset": "random_50m_2048_512_ip",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 50,
# "server.enable_gpu": True,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data_cpu/random_50m_2048_512_ip_sq8"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 1000,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8h"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 10000,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip"
# },
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [8, 32],
# "top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
# "nqs": [1, 10, 100, 500, 800, 1000],
# "server.use_blas_threshold": 1100,
# "server.cpu_cache_capacity": 110,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8"
# },
{
"dataset": "random_50m_1024_512_ip",
"index.index_types": ["ivf_pq"],
"index.nlists": [16384],
"nprobes": [8, 32],
"top_ks": [1, 4, 16, 64, 128, 256, 512, 1000],
"nqs": [1, 10, 100, 500, 800, 1000],
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.cpu_cache_capacity": 110,
"server.gpu_cache_capacity": 6,
"server.index_build_device": "gpu0",
"server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": True,
"db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_pq"
},
# {
# "dataset": "random_50m_1024_512_ip",
# "index.index_types": ["ivf_flat"],
# "index.nlists": [16384],
# "nprobes": [8],
# "top_ks": [8],
# "nqs": [500],
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.cpu_cache_capacity": 110,
# "server.gpu_cache_capacity": 6,
# "server.index_build_device": "gpu0",
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": True,
# "db_path_prefix": "/test/milvus/db_data/random_50m_1024_512_ip_ivf"
# }
]

View File

@ -1,51 +0,0 @@
accuracy:
# interface: search_vectors
query:
[
# {
# "dataset": "random_20m_1024_512_ip",
# # index info
# "index.index_types": ["flat", "ivf_sq8"],
# "index.nlists": [16384],
# "index.metric_types": ["ip"],
# "nprobes": [1, 16, 64],
# "top_ks": [64],
# "nqs": [100],
# "server.cpu_cache_capacity": 100,
# "server.resources": ["gpu0"],
# "db_path_prefix": "/test/milvus/db_data/random_20m_1024_512_ip",
# },
# {
# "dataset": "sift_1m_1024_128_l2",
# "index.index_types": ["flat", "ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [1, 32, 128, 256, 512],
# "nqs": 10,
# "top_ks": 10,
# "server.use_blas_threshold": 1100,
# "server.cpu_cache_capacity": 16,
# },
# {
# "dataset": "sift_10m_1024_128_l2",
# "index.index_types": ["flat", "ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [1, 32, 128, 256, 512],
# "nqs": 10,
# "top_ks": 10,
# "server.use_blas_threshold": 1100,
# "server.cpu_cache_capacity": 32,
# },
# {
# "dataset": "sift_50m_1024_128_l2",
# "index.index_types": ["flat", "ivf_sq8"],
# "index.nlists": [16384],
# "nprobes": [1, 32, 128, 256, 512],
# "nqs": 10,
# "top_ks": 10,
# "server.use_blas_threshold": 1100,
# "server.cpu_cache_capacity": 64,
# }
]

View File

@ -1,57 +0,0 @@
performance:
# interface: add_vectors
insert:
# index_type: flat/ivf_flat/ivf_sq8/mix_nsg
[
# debug
# data_type / data_size / index_file_size / dimension
# data_type: random / ann_sift
# data_size: 10m / 1b
# {
# "table_name": "random_50m_1024_512_ip",
# "ni_per": 100000,
# "server.cpu_cache_capacity": 16,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data"
# },
# {
# "table_name": "random_2m_1024_512_ip",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# "server.cpu_cache_capacity": 16,
# "server.resources": ["gpu0", "gpu1"],
# "db_path_prefix": "/test/milvus/db_data/random_5m_1024_512_ip"
# },
# {
# "table_name": "sift_1m_50_128_l2",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# # "server.cpu_cache_capacity": 16,
# "db_path_prefix": "/test/milvus/db_data"
# },
# {
# "table_name": "sift_1m_256_128_l2",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# # "server.cpu_cache_capacity": 16,
# "db_path_prefix": "/test/milvus/db_data"
# }
# {
# "table_name": "sift_50m_1024_128_l2",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# # "server.cpu_cache_capacity": 16,
# },
# {
# "table_name": "sift_100m_1024_128_l2",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# },
# {
# "table_name": "sift_1b_2048_128_l2",
# "ni_per": 100000,
# "processes": 5, # multiprocessing
# "server.cpu_cache_capacity": 16,
# }
]

View File

@ -1,31 +0,0 @@
stability:
# interface: search_vectors / add_vectors mix operation
query:
[
# {
# "dataset": "random_50m_1024_512_ip",
# "index_type": "ivf_sq8",
# "query_process_num": 10,
# "during_time": 960,
# "server.cpu_cache_capacity": 100,
# "server.use_blas_threshold": 1100,
# "server.use_gpu_threshold": 100,
# "server.resources": ["gpu0", "gpu1"],
# "server.enable_gpu": false,
# "db_path_prefix": "/test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8_stability"
# },
{
"dataset": "random_50m_1024_512_ip",
"index_type": "ivf_sq8",
"query_process_num": 10,
"during_time": 960,
"server.cpu_cache_capacity": 100,
"server.use_blas_threshold": 1100,
"server.use_gpu_threshold": 100,
"server.resources": ["gpu0", "gpu1"],
"server.enable_gpu": false,
"db_path_prefix": "/test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8_stability"
},
]

View File

@ -22,14 +22,12 @@ import h5py
from yaml import full_load, dump
import tableprint as tp
from pprint import pprint
from kubernetes import client, config
logger = logging.getLogger("milvus_benchmark.utils")
config.load_kube_config()
MULTI_DB_SLAVE_PATH = "/opt/milvus/data2;/opt/milvus/data3"
REGISTRY_URL = "registry.zilliz.com/milvus/engine"
def get_unique_name():
return "benchmark-test-"+"".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
@ -118,38 +116,88 @@ def update_server_config(file_path, server_config):
# update values.yaml
def update_values(file_path, hostname):
def update_values(file_path, hostname, server_config):
from kubernetes import client, config
client.rest.logger.setLevel(logging.WARNING)
if not os.path.isfile(file_path):
raise Exception('File: %s not found' % file_path)
# bak values.yaml
file_name = os.path.basename(file_path)
bak_file_name = file_name+".bak"
file_parent_path = os.path.dirname(file_path)
bak_file_path = file_parent_path+'/'+bak_file_name
if os.path.exists(bak_file_path):
os.system("cp %s %s" % (bak_file_path, file_path))
else:
os.system("cp %s %s" % (file_path, bak_file_path))
with open(file_path) as f:
values_dict = full_load(f)
f.close()
if values_dict['engine']['nodeSelector']:
logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector']))
return
for k, v in server_config.items():
if k.find("primary_path") != -1:
values_dict["primaryPath"] = v
values_dict['wal']['path'] = v+"/wal"
elif k.find("use_blas_threshold") != -1:
values_dict['useBLASThreshold'] = int(v)
elif k.find("gpu_search_threshold") != -1:
values_dict['gpuSearchThreshold'] = int(v)
elif k.find("cpu_cache_capacity") != -1:
values_dict['cpuCacheCapacity'] = int(v)
elif k.find("cache_insert_data") != -1:
values_dict['cacheInsertData'] = v
elif k.find("insert_buffer_size") != -1:
values_dict['insertBufferSize'] = v
elif k.find("gpu_resource_config.enable") != -1:
values_dict['gpu']['enabled'] = v
elif k.find("gpu_resource_config.cache_capacity") != -1:
values_dict['gpu']['cacheCapacity'] = int(v)
elif k.find("build_index_resources") != -1:
values_dict['gpu']['buildIndexResources'] = v
elif k.find("search_resources") != -1:
values_dict['gpu']['searchResources'] = v
# wal
elif k.find("auto_flush_interval") != -1:
values_dict['autoFlushInterval'] = v
elif k.find("wal_enable") != -1:
values_dict['wal']['enabled'] = v
# if values_dict['nodeSelector']:
# logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector']))
# return
values_dict["wal"]["ignoreErrorLog"] = True
# enable monitor
values_dict["metrics"]["enabled"] = True
values_dict["metrics"]["address"] = "192.168.1.237"
values_dict["metrics"]["port"] = 9091
# update values.yaml with the given host
# set limit/request cpus in resources
values_dict['nodeSelector'] = {'kubernetes.io/hostname': hostname}
# Using sqlite
values_dict["mysql"]["enabled"] = False
config.load_kube_config()
v1 = client.CoreV1Api()
node = v1.read_node(hostname)
cpus = node.status.allocatable.get("cpu")
# node = v1.read_node(hostname)
cpus = v1.read_node(hostname).status.allocatable.get("cpu")
# DEBUG
values_dict['engine']['resources'] = {
# set limit/request cpus in resources
values_dict['resources'] = {
"limits": {
"cpu": str(int(cpus)-1)+".0"
"cpu": str(int(cpus))+".0"
},
"requests": {
"cpu": str(int(cpus)-2)+".0"
"cpu": str(int(cpus)-1)+".0"
}
}
values_dict['engine']['nodeSelector'] = {'kubernetes.io/hostname': hostname}
values_dict['engine']['tolerations'].append({
values_dict['tolerations'] = [{
"key": "worker",
"operator": "Equal",
"value": "performance",
"effect": "NoSchedule"
})
}]
# add extra volumes
values_dict['extraVolumes'].append({
values_dict['extraVolumes'] = [{
'name': 'test',
'flexVolume': {
'driver': "fstab/cifs",
@ -162,11 +210,12 @@ def update_values(file_path, hostname):
'mountOptions': "vers=1.0"
}
}
})
values_dict['extraVolumeMounts'].append({
}]
values_dict['extraVolumeMounts'] = [{
'name': 'test',
'mountPath': '/test'
})
}]
logger.debug(values_dict)
with open(file_path, 'w') as f:
dump(values_dict, f, default_flow_style=False)
f.close()
@ -174,22 +223,27 @@ def update_values(file_path, hostname):
# deploy server
def helm_install_server(helm_path, image_tag, image_type, name, namespace):
timeout = 180
install_cmd = "helm install --wait --timeout %d \
--set engine.image.tag=%s \
--set expose.type=clusterIP \
--name %s \
-f ci/db_backend/sqlite_%s_values.yaml \
from kubernetes import client, config
client.rest.logger.setLevel(logging.WARNING)
timeout = 300
install_cmd = "helm install --wait --timeout %ds \
--set image.repository=%s \
--set image.tag=%s \
--set image.pullPolicy=Always \
--set service.type=ClusterIP \
-f ci/filebeat/values.yaml \
--namespace %s \
--version 0.0 ." % (timeout, image_tag, name, image_type, namespace)
%s ." % (timeout, REGISTRY_URL, image_tag, namespace, name)
logger.debug(install_cmd)
if os.system("cd %s && %s" % (helm_path, install_cmd)):
logger.error("Helm install failed")
return None
time.sleep(5)
config.load_kube_config()
v1 = client.CoreV1Api()
host = "%s-milvus-engine.%s.svc.cluster.local" % (name, namespace)
host = "%s.%s.svc.cluster.local" % (name, namespace)
logger.debug(host)
pod_name = None
pod_id = None
pods = v1.list_namespaced_pod(namespace)
@ -203,8 +257,10 @@ def helm_install_server(helm_path, image_tag, image_type, name, namespace):
# delete server
def helm_del_server(name):
del_cmd = "helm del --purge %s" % name
def helm_del_server(name, namespace):
# del_cmd = "helm uninstall -n milvus benchmark-test-gzelwvgk"
# os.system(del_cmd)
del_cmd = "helm uninstall -n milvus %s" % name
logger.debug(del_cmd)
if os.system(del_cmd):
logger.error("Helm delete name:%s failed" % name)
@ -342,4 +398,4 @@ def helm_del_server(name):
if __name__ == '__main__':
# print(pull_image('branch-0.3.1-debug'))
stop_server()
stop_server()

View File

@ -678,6 +678,11 @@ class TestAddBase:
assert status.OK()
class TestAddAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[

View File

@ -234,6 +234,11 @@ class TestFlushBase:
class TestFlushAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `flush` function

View File

@ -1808,6 +1808,11 @@ class TestCreateIndexParamsInvalid(object):
assert result._index_type == IndexType.FLAT
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function

View File

@ -1,7 +1,7 @@
import logging
import pytest
__version__ = '0.8.0'
__version__ = '0.9.0'
class TestPing: