[skip ci] Add proxy pod scale test and change ci parallel num (#6856)

* add expand proxy pod test

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>

* test shrink proxy pod

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>

* [skip ci] change parallel_num num from auto back to 3

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>
This commit is contained in:
ThreadDao 2021-07-28 17:01:22 +08:00 committed by GitHub
parent e30f2ac210
commit 5bd32c910c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 124 additions and 11 deletions

View File

@ -17,7 +17,7 @@ set -x
MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}"
MILVUS_CLUSTER_ENABLED="${MILVUS_CLUSTER_ENABLED:-false}"
MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}"
PARALLEL_NUM="${PARALLEL_NUM:-4}"
PARALLEL_NUM="${PARALLEL_NUM:-3}"
MILVUS_CLIENT="${MILVUS_CLIENT:-pymilvus}"
SOURCE="${BASH_SOURCE[0]}"
@ -68,10 +68,10 @@ pushd "${ROOT}/tests/docker"
else
if [[ "${MILVUS_CLIENT}" == "pymilvus" ]]; then
export MILVUS_PYTEST_WORKSPACE="/milvus/tests/python_test"
docker-compose run --rm pytest /bin/bash -c "pytest -n auto --ip ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} ${@:-}"
docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --ip ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} ${@:-}"
elif [[ "${MILVUS_CLIENT}" == "pymilvus-orm" ]]; then
export MILVUS_PYTEST_WORKSPACE="/milvus/tests20/python_client"
docker-compose run --rm pytest /bin/bash -c "pytest -n auto --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \
docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \
--html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-}"
fi
fi

View File

@ -13,6 +13,7 @@ default_top_k = 10
default_nq = 2
default_limit = 10
default_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
default_index = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
max_top_k = 16384
max_partition_num = 4096 # 256
default_segment_row_limit = 1000

View File

@ -11,4 +11,4 @@ QUERY_NODE = "queryNode"
# my values.yaml path
MILVUS_CHART_ENV = 'MILVUS_CHART_ENV'
MILVUS_CHART = '/home/zong/milvus-helm/charts/milvus'
MILVUS_CHART_PATH = '/home/zong/milvus-helm/charts/milvus'

View File

@ -38,7 +38,10 @@ class HelmEnv:
f'{self.release_name} . '
log.debug(f'install_cmd: {install_cmd}')
log.debug(f'MILVUS CHART: {sc.get_milvus_chart_env_var()}')
os.system(f'cd {sc.get_milvus_chart_env_var()} && {install_cmd}')
try:
os.system(f'cd {sc.get_milvus_chart_env_var()} && {install_cmd}')
except Exception as e:
raise
# raise Exception("Failed to deploy cluster milvus")
# todo
# return svc ip
@ -102,4 +105,4 @@ if __name__ == '__main__':
# env.helm_install_cluster_milvus()
# env.helm_upgrade_cluster_milvus(queryNode=2)
env.helm_uninstall_cluster_milvus()
sleep(5)
sleep(5)

View File

@ -1,7 +1,12 @@
import os
from pymilvus_orm import connections, Index
from scale import constants
from utils.util_log import test_log as log
from base.collection_wrapper import ApiCollectionWrapper
from common import common_func as cf
from common import common_type as ct
def get_milvus_chart_env_var(var=constants.MILVUS_CHART_ENV):
@ -10,6 +15,44 @@ def get_milvus_chart_env_var(var=constants.MILVUS_CHART_ENV):
milvus_helm_chart = os.environ[var]
return str(milvus_helm_chart)
except Exception as e:
milvus_helm_chart = constants.MILVUS_CHART
log.warning(f"Failed to get environment variables: {var}, use default: {milvus_helm_chart}, error: {str(e)}")
return milvus_helm_chart
milvus_helm_chart = constants.MILVUS_CHART_PATH
log.warning(
f"Failed to get environment variables: {var}, use default: {constants.MILVUS_CHART_PATH}, {str(e)}")
if not os.path.exists(milvus_helm_chart):
raise Exception(f'milvus_helm_chart: {milvus_helm_chart} not exist')
return milvus_helm_chart
def e2e_milvus(host, c_name):
# connect
connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default')
# create
# c_name = cf.gen_unique_str(prefix)
collection_w = ApiCollectionWrapper()
collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema())
# collection_w.init_collection(name=c_name)
# insert
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data)
assert mutation_res.insert_count == ct.default_nb
# create index
collection_w.create_index(ct.default_float_vec_field_name, ct.default_index)
assert collection_w.has_index()[0]
assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name,
ct.default_index)
# search
collection_w.load()
search_res, _ = collection_w.search(data[-1][:ct.default_nq], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert len(search_res[0]) == ct.default_limit
# query
ids = search_res[0].ids[0]
term_expr = f'{ct.default_int64_field_name} in [{ids}]'
query_res, _ = collection_w.query(term_expr, output_fields=["*", "%"])
assert query_res[0][ct.default_int64_field_name] == ids

View File

@ -29,9 +29,10 @@ class TestIndexNodeScale:
release_name = "scale-index"
env = HelmEnv(release_name=release_name)
env.helm_install_cluster_milvus()
host = env.get_svc_external_ip()
# connect
connections.add_connection(default={"host": '10.98.0.8', "port": 19530})
connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default')
data = cf.gen_default_dataframe_data(nb)

View File

@ -0,0 +1,64 @@
import pytest
from scale.helm_env import HelmEnv
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel
from scale import scale_common as sc
from scale import constants
prefix = "proxy_scale"
class TestProxyScale:
@pytest.mark.tags(CaseLabel.L3)
def test_expand_proxy(self):
"""
target: test milvus operation after proxy expand
method: 1.deploy two proxy pods
2.milvus e2e test
3.expand proxy pod from 1 to 2
4.milvus e2e test
expected: 1.verify data consistent and func work
"""
# deploy all nodes one pod cluster milvus with helm
release_name = "scale-proxy"
env = HelmEnv(release_name=release_name)
env.helm_install_cluster_milvus()
host = env.get_svc_external_ip()
# host = "10.98.0.8"
c_name = cf.gen_unique_str(prefix)
sc.e2e_milvus(host, c_name)
# scale proxy
env.helm_upgrade_cluster_milvus(proxy=2)
c_name_2 = cf.gen_unique_str(prefix)
sc.e2e_milvus(host, c_name_2)
def test_shrink_proxy(self):
"""
target: test shrink proxy pod from 2 to 1
method: 1.deploy two proxy node
2.e2e test
3.shrink proxy pods
4.e2e test
expected:
"""
# deploy all nodes one pod cluster milvus with helm
release_name = "scale-proxy"
env = HelmEnv(release_name=release_name, proxy=2)
env.helm_install_cluster_milvus()
host = env.get_svc_external_ip()
# host = "10.98.0.8"
c_name = cf.gen_unique_str(prefix)
sc.e2e_milvus(host, c_name)
# scale proxy
env.helm_upgrade_cluster_milvus(proxy=1)
c_name_2 = cf.gen_unique_str(prefix)
sc.e2e_milvus(host, c_name_2)

View File

@ -27,9 +27,10 @@ class TestQueryNodeScale:
release_name = "scale-query"
env = HelmEnv(release_name=release_name)
env.helm_install_cluster_milvus()
host = env.get_svc_external_ip()
# connect
connections.add_connection(default={"host": '10.98.0.8', "port": 19530})
connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default')
# create